Merge "Make grpc lib available for recovery" into main
diff --git a/Android.bp b/Android.bp
index b05496d..0bd4ca3 100644
--- a/Android.bp
+++ b/Android.bp
@@ -31,18 +31,30 @@
         "absl/log/internal/test_matchers.cc",
         "absl/log/scoped_mock_log.cc",
         "absl/random/internal/gaussian_distribution_gentables.cc",
+        "absl/status/internal/status_matchers.cc",
     ],
     export_include_dirs: ["."],
     shared_libs: [
         "liblog",
     ],
     stl: "libc++",
+    sdk_version: "current",
+    min_sdk_version: "apex_inherit",
     apex_available: [
         "//apex_available:platform",
+        "com.android.adservices",
+        "com.android.extservices",
+        "com.android.ondevicepersonalization",
     ],
     visibility: [
+        "//external/federated-compute:__subpackages__",
         "//external/grpc-grpc:__subpackages__",
+        "//external/libtextclassifier:__subpackages__",
         "//external/kythe:__subpackages__",
+        "//external/tensorflow:__subpackages__",
+        "//external/tflite-support:__subpackages__",
+        "//external/webrtc:__subpackages__",
+        "//frameworks/av/media/libeffects/preprocessing",
     ],
 }
 
diff --git a/BUILD.bazel b/BUILD.bazel
index 79fb0ec..03122a9 100644
--- a/BUILD.bazel
+++ b/BUILD.bazel
@@ -1,4 +1,3 @@
-#
 # Copyright 2020 The Abseil Authors.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -23,3 +22,14 @@
     "AUTHORS",
     "LICENSE",
 ])
+
+# For building with clang-cl.
+# https://bazel.build/configure/windows#clang
+platform(
+    name = "x64_windows-clang-cl",
+    constraint_values = [
+        "@platforms//cpu:x86_64",
+        "@platforms//os:windows",
+        "@bazel_tools//tools/cpp:clang-cl",
+    ],
+)
diff --git a/CMake/AbseilDll.cmake b/CMake/AbseilDll.cmake
index 47f3bee..32cc28f 100644
--- a/CMake/AbseilDll.cmake
+++ b/CMake/AbseilDll.cmake
@@ -28,6 +28,8 @@
   "base/internal/low_level_scheduling.h"
   "base/internal/nullability_impl.h"
   "base/internal/per_thread_tls.h"
+  "base/internal/poison.cc"
+  "base/internal/poison.h"
   "base/prefetch.h"
   "base/internal/pretty_function.h"
   "base/internal/raw_logging.cc"
@@ -66,6 +68,7 @@
   "cleanup/internal/cleanup.h"
   "container/btree_map.h"
   "container/btree_set.h"
+  "container/hash_container_defaults.h"
   "container/fixed_array.h"
   "container/flat_hash_map.h"
   "container/flat_hash_set.h"
@@ -121,8 +124,13 @@
   "debugging/symbolize.h"
   "debugging/internal/address_is_readable.cc"
   "debugging/internal/address_is_readable.h"
+  "debugging/internal/bounded_utf8_length_sequence.h"
+  "debugging/internal/decode_rust_punycode.cc"
+  "debugging/internal/decode_rust_punycode.h"
   "debugging/internal/demangle.cc"
   "debugging/internal/demangle.h"
+  "debugging/internal/demangle_rust.cc"
+  "debugging/internal/demangle_rust.h"
   "debugging/internal/elf_mem_image.cc"
   "debugging/internal/elf_mem_image.h"
   "debugging/internal/examine_stack.cc"
@@ -131,6 +139,8 @@
   "debugging/internal/stack_consumption.h"
   "debugging/internal/stacktrace_config.h"
   "debugging/internal/symbolize.h"
+  "debugging/internal/utf8_for_code_point.cc"
+  "debugging/internal/utf8_for_code_point.h"
   "debugging/internal/vdso_support.cc"
   "debugging/internal/vdso_support.h"
   "functional/any_invocable.h"
@@ -311,7 +321,6 @@
   "strings/internal/string_constant.h"
   "strings/internal/stringify_sink.h"
   "strings/internal/stringify_sink.cc"
-  "strings/internal/has_absl_stringify.h"
   "strings/has_absl_stringify.h"
   "strings/has_ostream_operator.h"
   "strings/match.cc"
@@ -437,9 +446,47 @@
   "debugging/leak_check.cc"
 )
 
+if(NOT MSVC)
+  list(APPEND ABSL_INTERNAL_DLL_FILES
+    "flags/commandlineflag.cc"
+    "flags/commandlineflag.h"
+    "flags/config.h"
+    "flags/declare.h"
+    "flags/flag.h"
+    "flags/internal/commandlineflag.cc"
+    "flags/internal/commandlineflag.h"
+    "flags/internal/flag.cc"
+    "flags/internal/flag.h"
+    "flags/internal/parse.h"
+    "flags/internal/path_util.h"
+    "flags/internal/private_handle_accessor.cc"
+    "flags/internal/private_handle_accessor.h"
+    "flags/internal/program_name.cc"
+    "flags/internal/program_name.h"
+    "flags/internal/registry.h"
+    "flags/internal/sequence_lock.h"
+    "flags/internal/usage.cc"
+    "flags/internal/usage.h"
+    "flags/marshalling.cc"
+    "flags/marshalling.h"
+    "flags/parse.cc"
+    "flags/parse.h"
+    "flags/reflection.cc"
+    "flags/reflection.h"
+    "flags/usage.cc"
+    "flags/usage.h"
+    "flags/usage_config.cc"
+    "flags/usage_config.h"
+    "log/flags.cc"
+    "log/flags.h"
+    "log/internal/flags.h"
+  )
+endif()
+
 set(ABSL_INTERNAL_DLL_TARGETS
   "absl_check"
   "absl_log"
+  "absl_vlog_is_on"
   "algorithm"
   "algorithm_container"
   "any"
@@ -505,6 +552,7 @@
   "log_internal_check_op"
   "log_internal_conditions"
   "log_internal_config"
+  "log_internal_fnmatch"
   "log_internal_format"
   "log_internal_globals"
   "log_internal_log_impl"
@@ -584,6 +632,7 @@
   "strerror"
   "strings"
   "strings_internal"
+  "string_view"
   "symbolize"
   "synchronization"
   "thread_pool"
@@ -594,8 +643,30 @@
   "type_traits"
   "utility"
   "variant"
+  "vlog_config_internal"
+  "vlog_is_on"
 )
 
+if(NOT MSVC)
+  list(APPEND ABSL_INTERNAL_DLL_TARGETS
+    "flags"
+    "flags_commandlineflag"
+    "flags_commandlineflag_internal"
+    "flags_config"
+    "flags_internal"
+    "flags_marshalling"
+    "flags_parse"
+    "flags_path_util"
+    "flags_private_handle_accessor"
+    "flags_program_name"
+    "flags_reflection"
+    "flags_usage"
+    "flags_usage_internal"
+    "log_internal_flags"
+    "log_flags"
+  )
+endif()
+
 set(ABSL_INTERNAL_TEST_DLL_FILES
   "hash/hash_testing.h"
   "log/scoped_mock_log.cc"
@@ -608,6 +679,9 @@
   "random/internal/mock_overload_set.h"
   "random/mocking_bit_gen.h"
   "random/mock_distributions.h"
+  "status/status_matchers.h"
+  "status/internal/status_matchers.cc"
+  "status/internal/status_matchers.h"
   "strings/cordz_test_helpers.h"
   "strings/cord_test_helpers.h"
 )
@@ -620,6 +694,7 @@
   "random_internal_distribution_test_util"
   "random_internal_mock_overload_set"
   "scoped_mock_log"
+  "status_matchers"
 )
 
 include(CheckCXXSourceCompiles)
@@ -668,12 +743,7 @@
 
   STRING(REGEX REPLACE "^absl::" "" _target ${ABSL_INTERNAL_DLL_TARGET})
 
-  list(FIND
-    ABSL_INTERNAL_DLL_TARGETS
-    "${_target}"
-    _index)
-
-  if (${_index} GREATER -1)
+  if (_target IN_LIST ABSL_INTERNAL_DLL_TARGETS)
     set(${ABSL_INTERNAL_DLL_OUTPUT} 1 PARENT_SCOPE)
   else()
     set(${ABSL_INTERNAL_DLL_OUTPUT} 0 PARENT_SCOPE)
@@ -690,12 +760,7 @@
 
   STRING(REGEX REPLACE "^absl::" "" _target ${ABSL_INTERNAL_TEST_DLL_TARGET})
 
-  list(FIND
-    ABSL_INTERNAL_TEST_DLL_TARGETS
-    "${_target}"
-    _index)
-
-  if (${_index} GREATER -1)
+  if (_target IN_LIST ABSL_INTERNAL_TEST_DLL_TARGETS)
     set(${ABSL_INTERNAL_TEST_DLL_OUTPUT} 1 PARENT_SCOPE)
   else()
     set(${ABSL_INTERNAL_TEST_DLL_OUTPUT} 0 PARENT_SCOPE)
@@ -747,7 +812,12 @@
   else()
     set(_dll "abseil_dll")
     set(_dll_files ${ABSL_INTERNAL_DLL_FILES})
-    set(_dll_libs "")
+    set(_dll_libs
+      Threads::Threads
+      # TODO(#1495): Use $<LINK_LIBRARY:FRAMEWORK,CoreFoundation> once our
+      # minimum CMake version >= 3.24
+      $<$<PLATFORM_ID:Darwin>:-Wl,-framework,CoreFoundation>
+    )
     set(_dll_compile_definitions "")
     set(_dll_includes "")
     set(_dll_consume "ABSL_CONSUME_DLL")
@@ -765,7 +835,10 @@
       ${_dll_libs}
       ${ABSL_DEFAULT_LINKOPTS}
   )
-  set_property(TARGET ${_dll} PROPERTY LINKER_LANGUAGE "CXX")
+  set_target_properties(${_dll} PROPERTIES
+    LINKER_LANGUAGE "CXX"
+    SOVERSION ${ABSL_SOVERSION}
+  )
   target_include_directories(
     ${_dll}
     PUBLIC
diff --git a/CMake/AbseilHelpers.cmake b/CMake/AbseilHelpers.cmake
index c53b358..b177e59 100644
--- a/CMake/AbseilHelpers.cmake
+++ b/CMake/AbseilHelpers.cmake
@@ -186,8 +186,16 @@
         endif()
       endif()
     endforeach()
+    set(skip_next_cflag OFF)
     foreach(cflag ${ABSL_CC_LIB_COPTS})
-      if(${cflag} MATCHES "^(-Wno|/wd)")
+      if(skip_next_cflag)
+        set(skip_next_cflag OFF)
+      elseif(${cflag} MATCHES "^-Xarch_")
+        # An -Xarch_ flag implies that its successor only applies to the
+        # specified platform. Filter both of them out before the successor
+        # reaches the "^-m" filter.
+        set(skip_next_cflag ON)
+      elseif(${cflag} MATCHES "^(-Wno|/wd)")
         # These flags are needed to suppress warnings that might fire in our headers.
         set(PC_CFLAGS "${PC_CFLAGS} ${cflag}")
       elseif(${cflag} MATCHES "^(-W|/w[1234eo])")
@@ -250,6 +258,13 @@
     elseif(_build_type STREQUAL "static" OR _build_type STREQUAL "shared")
       add_library(${_NAME} "")
       target_sources(${_NAME} PRIVATE ${ABSL_CC_LIB_SRCS} ${ABSL_CC_LIB_HDRS})
+      if(APPLE)
+        set_target_properties(${_NAME} PROPERTIES
+          INSTALL_RPATH "@loader_path")
+      elseif(UNIX)
+        set_target_properties(${_NAME} PROPERTIES
+          INSTALL_RPATH "$ORIGIN")
+      endif()
       target_link_libraries(${_NAME}
       PUBLIC ${ABSL_CC_LIB_DEPS}
       PRIVATE
@@ -298,7 +313,7 @@
     if(ABSL_ENABLE_INSTALL)
       set_target_properties(${_NAME} PROPERTIES
         OUTPUT_NAME "absl_${_NAME}"
-        SOVERSION "2401.0.0"
+        SOVERSION "${ABSL_SOVERSION}"
       )
     endif()
   else()
diff --git a/CMake/Googletest/CMakeLists.txt.in b/CMake/Googletest/CMakeLists.txt.in
index 75691b1..3db4834 100644
--- a/CMake/Googletest/CMakeLists.txt.in
+++ b/CMake/Googletest/CMakeLists.txt.in
@@ -1,4 +1,4 @@
-cmake_minimum_required(VERSION 3.10)
+cmake_minimum_required(VERSION 3.16)
 
 project(googletest-external NONE)
 
diff --git a/CMake/README.md b/CMake/README.md
index c7ddee6..808edfe 100644
--- a/CMake/README.md
+++ b/CMake/README.md
@@ -39,7 +39,7 @@
 Here is a short CMakeLists.txt example of an application project using Abseil.
 
 ```cmake
-cmake_minimum_required(VERSION 3.10)
+cmake_minimum_required(VERSION 3.16)
 project(my_app_project)
 
 # Pick the C++ standard to compile with.
@@ -62,7 +62,7 @@
 example:
 
 ```cmake
-cmake_minimum_required(VERSION 3.10)
+cmake_minimum_required(VERSION 3.16)
 project(my_lib_project)
 
 # Leave C++ standard up to the root application, so set it only if this is the
diff --git a/CMake/install_test_project/CMakeLists.txt b/CMake/install_test_project/CMakeLists.txt
index 30c23b2..3229887 100644
--- a/CMake/install_test_project/CMakeLists.txt
+++ b/CMake/install_test_project/CMakeLists.txt
@@ -15,7 +15,7 @@
 
 # A simple CMakeLists.txt for testing cmake installation
 
-cmake_minimum_required(VERSION 3.10)
+cmake_minimum_required(VERSION 3.16)
 project(absl_cmake_testing CXX)
 
 add_executable(simple simple.cc)
diff --git a/CMake/install_test_project/test.sh b/CMake/install_test_project/test.sh
index cc028ba..962bc8d 100755
--- a/CMake/install_test_project/test.sh
+++ b/CMake/install_test_project/test.sh
@@ -22,7 +22,8 @@
 absl_dir=/abseil-cpp
 absl_build_dir=/buildfs
 googletest_builddir=/googletest_builddir
-project_dir="${absl_dir}"/CMake/install_test_project
+googletest_archive="googletest-${ABSL_GOOGLETEST_VERSION}.tar.gz"
+project_dir="${absl_dir}/CMake/install_test_project"
 project_build_dir=/buildfs/project-build
 
 build_shared_libs="OFF"
@@ -33,9 +34,9 @@
 # Build and install GoogleTest
 mkdir "${googletest_builddir}"
 pushd "${googletest_builddir}"
-curl -L "${ABSL_GOOGLETEST_DOWNLOAD_URL}" --output "${ABSL_GOOGLETEST_COMMIT}".zip
-unzip "${ABSL_GOOGLETEST_COMMIT}".zip
-pushd "googletest-${ABSL_GOOGLETEST_COMMIT}"
+curl -L "${ABSL_GOOGLETEST_DOWNLOAD_URL}" --output "${googletest_archive}"
+tar -xz -f "${googletest_archive}"
+pushd "googletest-${ABSL_GOOGLETEST_VERSION}"
 mkdir build
 pushd build
 cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS="${build_shared_libs}" ..
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 194f870..7c82b3a 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -15,50 +15,16 @@
 #
 
 # https://github.com/google/oss-policies-info/blob/main/foundational-cxx-support-matrix.md
-# As of 2022-09-06, CMake 3.10 is the minimum supported version.
-cmake_minimum_required(VERSION 3.10)
-
-# Compiler id for Apple Clang is now AppleClang.
-if (POLICY CMP0025)
-  cmake_policy(SET CMP0025 NEW)
-endif (POLICY CMP0025)
-
-# if command can use IN_LIST
-if (POLICY CMP0057)
-  cmake_policy(SET CMP0057 NEW)
-endif (POLICY CMP0057)
-
-# Project version variables are the empty string if version is unspecified
-if (POLICY CMP0048)
-  cmake_policy(SET CMP0048 NEW)
-endif (POLICY CMP0048)
-
-# Honor the GTest_ROOT variable if specified
-if (POLICY CMP0074)
-  cmake_policy(SET CMP0074 NEW)
-endif (POLICY CMP0074)
-
-# option() honor variables
-if (POLICY CMP0077)
-  cmake_policy(SET CMP0077 NEW)
-endif (POLICY CMP0077)
-
-# Allow the user to specify the MSVC runtime
-if (POLICY CMP0091)
-  cmake_policy(SET CMP0091 NEW)
-endif (POLICY CMP0091)
-
-# try_compile() honors the CMAKE_CXX_STANDARD value
-if (POLICY CMP0067)
-  cmake_policy(SET CMP0067 NEW)
-endif (POLICY CMP0067)
+# As of 2024-07-01, CMake 3.16 is the minimum supported version.
+cmake_minimum_required(VERSION 3.16)
 
 # Allow the user to specify the CMAKE_MSVC_DEBUG_INFORMATION_FORMAT
 if (POLICY CMP0141)
   cmake_policy(SET CMP0141 NEW)
 endif (POLICY CMP0141)
 
-project(absl LANGUAGES CXX VERSION 20240116)
+project(absl LANGUAGES CXX VERSION 20240722)
+set(ABSL_SOVERSION "2407.0.0")
 include(CTest)
 
 # Output directory is correct by default for most build setups. However, when
@@ -75,6 +41,10 @@
   option(ABSL_ENABLE_INSTALL "Enable install rule" ON)
 endif()
 
+set(CMAKE_INSTALL_RPATH "$ORIGIN")
+set(CMAKE_INSTALL_RPATH_USE_LINK_PATH ON)
+set(CMAKE_BUILD_RPATH_USE_ORIGIN ON)
+
 option(ABSL_PROPAGATE_CXX_STD
   "Use CMake C++ standard meta features (e.g. cxx_std_14) that propagate to targets that link to Abseil"
   OFF)  # TODO: Default to ON for CMake 3.8 and greater.
@@ -91,6 +61,15 @@
   ${CMAKE_CURRENT_LIST_DIR}/absl/copts
 )
 
+option(ABSL_MSVC_STATIC_RUNTIME
+  "Link static runtime libraries"
+  OFF)
+if(ABSL_MSVC_STATIC_RUNTIME)
+  set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$<CONFIG:Debug>:Debug>")
+else()
+  set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$<CONFIG:Debug>:Debug>DLL")
+endif()
+
 include(CMakePackageConfigHelpers)
 include(GNUInstallDirs)
 include(AbseilDll)
@@ -149,6 +128,14 @@
   "If ABSL_USE_GOOGLETEST_HEAD is OFF and ABSL_GOOGLETEST_URL is not set, specifies the directory of a local GoogleTest checkout."
   )
 
+option(ABSL_BUILD_MONOLITHIC_SHARED_LIBS
+  "Build Abseil as a single shared library (always enabled for Windows)"
+  OFF
+)
+if(NOT BUILD_SHARED_LIBS AND ABSL_BUILD_MONOLITHIC_SHARED_LIBS)
+  message(WARNING "Not building a shared library because BUILD_SHARED_LIBS is not set. Ignoring ABSL_BUILD_MONOLITHIC_SHARED_LIBS.")
+endif()
+
 if((BUILD_TESTING AND ABSL_BUILD_TESTING) OR ABSL_BUILD_TEST_HELPERS)
   if (ABSL_USE_EXTERNAL_GOOGLETEST)
     if (ABSL_FIND_GOOGLETEST)
@@ -261,7 +248,7 @@
     ABSL_INTERNAL_OPTIONS_H_PINNED
     "${ABSL_INTERNAL_OPTIONS_H_CONTENTS}")
 
-  file(WRITE "${CMAKE_BINARY_DIR}/options-pinned.h" "${ABSL_INTERNAL_OPTIONS_H_PINNED}")
+  file(GENERATE OUTPUT "${CMAKE_BINARY_DIR}/options-pinned.h" CONTENT "${ABSL_INTERNAL_OPTIONS_H_PINNED}")
 
   install(FILES "${CMAKE_BINARY_DIR}/options-pinned.h"
          DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/absl/base
diff --git a/METADATA b/METADATA
index 6c196d6..4f42d94 100644
--- a/METADATA
+++ b/METADATA
@@ -12,7 +12,7 @@
     type: GIT
     value: "https://github.com/abseil/abseil-cpp"
   }
-  version: "20240116.1"
-  last_upgrade_date { year: 2024 month: 2 day: 28 }
+  version: "20240722.0"
+  last_upgrade_date { year: 2024 month: 11 day: 4 }
   license_type: NOTICE
 }
diff --git a/MODULE.bazel b/MODULE.bazel
index efbc88b..75285b6 100644
--- a/MODULE.bazel
+++ b/MODULE.bazel
@@ -16,10 +16,13 @@
 
 module(
     name = "abseil-cpp",
-    version = "20240116.0",
+    version = "20240722.0",
     compatibility_level = 1,
 )
 
+cc_configure = use_extension("@bazel_tools//tools/cpp:cc_configure.bzl", "cc_configure_extension")
+use_repo(cc_configure, "local_config_cc")
+
 # Only direct dependencies need to be listed below.
 # Please keep the versions in sync with the versions in the WORKSPACE file.
 
@@ -32,8 +35,8 @@
           dev_dependency = True)
 
 bazel_dep(name = "googletest",
-          version = "1.14.0.bcr.1",
+          version = "1.15.2",
           repo_name = "com_google_googletest")
 
 bazel_dep(name = "platforms",
-          version = "0.0.8")
+          version = "0.0.10")
diff --git a/OWNERS b/OWNERS
index cd410bc..9d4dab0 100644
--- a/OWNERS
+++ b/OWNERS
@@ -1,2 +1,2 @@
[email protected]
[email protected]
\ No newline at end of file
+include platform/system/core:/janitors/OWNERS
[email protected]
diff --git a/PrivacyInfo.xcprivacy b/PrivacyInfo.xcprivacy
index 6af1641..3ff4a9d 100644
--- a/PrivacyInfo.xcprivacy
+++ b/PrivacyInfo.xcprivacy
@@ -2,13 +2,13 @@
 <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
 <plist version="1.0">
 <dict>
-	<key>NSPrivacyTracking</key>
-	<false/>
-	<key>NSPrivacyCollectedDataTypes</key>
-	<array/>
-	<key>NSPrivacyTrackingDomains</key>
-	<array/>
-	<key>NSPrivacyAccessedAPITypes</key>
-	<array/>
+  <key>NSPrivacyTracking</key>
+  <false/>
+  <key>NSPrivacyCollectedDataTypes</key>
+  <array/>
+  <key>NSPrivacyTrackingDomains</key>
+  <array/>
+  <key>NSPrivacyAccessedAPITypes</key>
+  <array/>
 </dict>
 </plist>
diff --git a/WORKSPACE b/WORKSPACE
index 0d88609..dee6d05 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -20,20 +20,21 @@
 
 # GoogleTest/GoogleMock framework. Used by most unit-tests.
 http_archive(
-  name = "com_google_googletest",
-  sha256 = "8ad598c73ad796e0d8280b082cebd82a630d73e73cd3c70057938a6501bba5d7",
-  strip_prefix = "googletest-1.14.0",
-  # Keep this URL in sync with ABSL_GOOGLETEST_COMMIT in ci/cmake_common.sh and
-  # ci/windows_msvc_cmake.bat.
-  urls = ["https://github.com/google/googletest/archive/refs/tags/v1.14.0.tar.gz"],
+    name = "com_google_googletest",
+    sha256 = "7b42b4d6ed48810c5362c265a17faebe90dc2373c885e5216439d37927f02926",
+    strip_prefix = "googletest-1.15.2",
+    # Keep this URL in sync with the version in ci/cmake_common.sh and
+    # ci/windows_msvc_cmake.bat.
+    urls = ["https://github.com/google/googletest/releases/download/v1.15.2/googletest-1.15.2.tar.gz"],
 )
 
 # RE2 (the regular expression library used by GoogleTest)
 http_archive(
     name = "com_googlesource_code_re2",
-    sha256 = "828341ad08524618a626167bd320b0c2acc97bd1c28eff693a9ea33a7ed2a85f",
-    strip_prefix = "re2-2023-11-01",
-    urls = ["https://github.com/google/re2/releases/download/2023-11-01/re2-2023-11-01.zip"],
+    sha256 = "eb2df807c781601c14a260a507a5bb4509be1ee626024cb45acbd57cb9d4032b",
+    strip_prefix = "re2-2024-07-02",
+    urls = ["https://github.com/google/re2/releases/download/2024-07-02/re2-2024-07-02.tar.gz"],
+    repo_mapping = {"@abseil-cpp": "@com_google_absl"},
 )
 
 # Google benchmark.
@@ -46,14 +47,17 @@
 
 # Bazel Skylib.
 http_archive(
-  name = "bazel_skylib",
-  sha256 = "cd55a062e763b9349921f0f5db8c3933288dc8ba4f76dd9416aac68acee3cb94",
-  urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/1.5.0/bazel-skylib-1.5.0.tar.gz"],
+    name = "bazel_skylib",
+    sha256 = "cd55a062e763b9349921f0f5db8c3933288dc8ba4f76dd9416aac68acee3cb94",
+    urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/1.5.0/bazel-skylib-1.5.0.tar.gz"],
 )
 
 # Bazel platform rules.
 http_archive(
     name = "platforms",
-    sha256 = "8150406605389ececb6da07cbcb509d5637a3ab9a24bc69b1101531367d89d74",
-    urls = ["https://github.com/bazelbuild/platforms/releases/download/0.0.8/platforms-0.0.8.tar.gz"],
+    urls = [
+        "https://mirror.bazel.build/github.com/bazelbuild/platforms/releases/download/0.0.10/platforms-0.0.10.tar.gz",
+        "https://github.com/bazelbuild/platforms/releases/download/0.0.10/platforms-0.0.10.tar.gz",
+    ],
+    sha256 = "218efe8ee736d26a3572663b374a253c012b716d8af0c07e842e82f238a0a7ee",
 )
diff --git a/WORKSPACE.bzlmod b/WORKSPACE.bzlmod
new file mode 100644
index 0000000..83e67ba
--- /dev/null
+++ b/WORKSPACE.bzlmod
@@ -0,0 +1,19 @@
+# Copyright 2024 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# https://bazel.build/external/migration#workspace.bzlmod
+#
+# This file is intentionally empty. When bzlmod is enabled and this
+# file exists, the contents of WORKSPACE is ignored. This prevents
+# bzlmod builds from unintentionally depending on the WORKSPACE file.
diff --git a/absl/BUILD.bazel b/absl/BUILD.bazel
index 14c30b3..253c0ae 100644
--- a/absl/BUILD.bazel
+++ b/absl/BUILD.bazel
@@ -68,17 +68,6 @@
     visibility = [":__subpackages__"],
 )
 
-# x64_windows-clang-cl - used for selecting clang-cl for CI builds
-platform(
-    name = "x64_windows-clang-cl",
-    constraint_values = [
-        "@platforms//cpu:x86_64",
-        "@platforms//os:windows",
-        "@bazel_tools//tools/cpp:clang-cl",
-    ],
-    visibility = [":__subpackages__"],
-)
-
 config_setting(
     name = "osx",
     constraint_values = [
diff --git a/absl/CMakeLists.txt b/absl/CMakeLists.txt
index 3a7c12f..810d7f3 100644
--- a/absl/CMakeLists.txt
+++ b/absl/CMakeLists.txt
@@ -36,9 +36,9 @@
 add_subdirectory(types)
 add_subdirectory(utility)
 
-if (${ABSL_BUILD_DLL})
+if (ABSL_BUILD_DLL)
   absl_make_dll()
-  if (${ABSL_BUILD_TEST_HELPERS})
+  if ((BUILD_TESTING AND ABSL_BUILD_TESTING) OR ABSL_BUILD_TEST_HELPERS)
     absl_make_dll(TEST ON)
   endif()
 endif()
diff --git a/absl/abseil.podspec.gen.py b/absl/abseil.podspec.gen.py
index c83edbf..cbf7cb4 100755
--- a/absl/abseil.podspec.gen.py
+++ b/absl/abseil.podspec.gen.py
@@ -44,9 +44,14 @@
     'ALWAYS_SEARCH_USER_PATHS' => 'NO',
   }
   s.ios.deployment_target = '9.0'
-  s.osx.deployment_target = '10.10'
+  s.osx.deployment_target = '10.11'
   s.tvos.deployment_target = '9.0'
   s.watchos.deployment_target = '2.0'
+  s.subspec 'xcprivacy' do |ss|
+    ss.resource_bundles = {
+      ss.module_name => 'PrivacyInfo.xcprivacy',
+    }
+  end
 """
 
 # Rule object representing the rule of Bazel BUILD.
@@ -191,6 +196,12 @@
     name = get_spec_name(dep.replace(":", "/"))
     f.write("{indent}{var}.dependency '{dep}'\n".format(
         indent=indent, var=spec_var, dep=name))
+  # Writes dependency to xcprivacy
+  f.write(
+      "{indent}{var}.dependency '{dep}'\n".format(
+          indent=indent, var=spec_var, dep="abseil/xcprivacy"
+      )
+  )
 
 
 def write_indented_list(f, leading, values):
diff --git a/absl/algorithm/BUILD.bazel b/absl/algorithm/BUILD.bazel
index ddf9e11..f20e729 100644
--- a/absl/algorithm/BUILD.bazel
+++ b/absl/algorithm/BUILD.bazel
@@ -65,6 +65,7 @@
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
         ":algorithm",
+        "//absl/base:config",
         "//absl/base:core_headers",
         "//absl/base:nullability",
         "//absl/meta:type_traits",
@@ -79,6 +80,7 @@
     deps = [
         ":container",
         "//absl/base",
+        "//absl/base:config",
         "//absl/base:core_headers",
         "//absl/memory",
         "//absl/types:span",
diff --git a/absl/algorithm/CMakeLists.txt b/absl/algorithm/CMakeLists.txt
index 5577164..252b6b2 100644
--- a/absl/algorithm/CMakeLists.txt
+++ b/absl/algorithm/CMakeLists.txt
@@ -48,6 +48,7 @@
     ${ABSL_DEFAULT_COPTS}
   DEPS
     absl::algorithm
+    absl::config
     absl::core_headers
     absl::meta
     absl::nullability
@@ -64,6 +65,7 @@
   DEPS
     absl::algorithm_container
     absl::base
+    absl::config
     absl::core_headers
     absl::memory
     absl::span
diff --git a/absl/algorithm/container.h b/absl/algorithm/container.h
index c7bafae..6bbe3b5 100644
--- a/absl/algorithm/container.h
+++ b/absl/algorithm/container.h
@@ -44,6 +44,7 @@
 #include <cassert>
 #include <iterator>
 #include <numeric>
+#include <random>
 #include <type_traits>
 #include <unordered_map>
 #include <unordered_set>
@@ -51,6 +52,7 @@
 #include <vector>
 
 #include "absl/algorithm/algorithm.h"
+#include "absl/base/config.h"
 #include "absl/base/macros.h"
 #include "absl/base/nullability.h"
 #include "absl/meta/type_traits.h"
@@ -92,17 +94,17 @@
 //   using std::end;
 //   std::foo(begin(c), end(c));
 // becomes
-//   std::foo(container_algorithm_internal::begin(c),
-//            container_algorithm_internal::end(c));
+//   std::foo(container_algorithm_internal::c_begin(c),
+//            container_algorithm_internal::c_end(c));
 // These are meant for internal use only.
 
 template <typename C>
-ContainerIter<C> c_begin(C& c) {
+ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17 ContainerIter<C> c_begin(C& c) {
   return begin(c);
 }
 
 template <typename C>
-ContainerIter<C> c_end(C& c) {
+ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17 ContainerIter<C> c_end(C& c) {
   return end(c);
 }
 
@@ -145,8 +147,9 @@
 // Container-based version of the <iterator> `std::distance()` function to
 // return the number of elements within a container.
 template <typename C>
-container_algorithm_internal::ContainerDifferenceType<const C> c_distance(
-    const C& c) {
+ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17
+    container_algorithm_internal::ContainerDifferenceType<const C>
+    c_distance(const C& c) {
   return std::distance(container_algorithm_internal::c_begin(c),
                        container_algorithm_internal::c_end(c));
 }
@@ -210,6 +213,16 @@
                    std::forward<T>(value));
 }
 
+// c_contains()
+//
+// Container-based version of the <algorithm> `std::ranges::contains()` C++23
+// function to search a container for a value.
+template <typename Sequence, typename T>
+bool c_contains(const Sequence& sequence, T&& value) {
+  return absl::c_find(sequence, std::forward<T>(value)) !=
+         container_algorithm_internal::c_end(sequence);
+}
+
 // c_find_if()
 //
 // Container-based version of the <algorithm> `std::find_if()` function to find
@@ -426,6 +439,26 @@
                      std::forward<BinaryPredicate>(pred));
 }
 
+// c_contains_subrange()
+//
+// Container-based version of the <algorithm> `std::ranges::contains_subrange()`
+// C++23 function to search a container for a subsequence.
+template <typename Sequence1, typename Sequence2>
+bool c_contains_subrange(Sequence1& sequence, Sequence2& subsequence) {
+  return absl::c_search(sequence, subsequence) !=
+         container_algorithm_internal::c_end(sequence);
+}
+
+// Overload of c_contains_subrange() for using a predicate evaluation other than
+// `==` as the function's test condition.
+template <typename Sequence1, typename Sequence2, typename BinaryPredicate>
+bool c_contains_subrange(Sequence1& sequence, Sequence2& subsequence,
+                         BinaryPredicate&& pred) {
+  return absl::c_search(sequence, subsequence,
+                        std::forward<BinaryPredicate>(pred)) !=
+         container_algorithm_internal::c_end(sequence);
+}
+
 // c_search_n()
 //
 // Container-based version of the <algorithm> `std::search_n()` function to
@@ -1500,8 +1533,9 @@
 // to return an iterator pointing to the element with the smallest value, using
 // `operator<` to make the comparisons.
 template <typename Sequence>
-container_algorithm_internal::ContainerIter<Sequence> c_min_element(
-    Sequence& sequence) {
+ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17
+    container_algorithm_internal::ContainerIter<Sequence>
+    c_min_element(Sequence& sequence) {
   return std::min_element(container_algorithm_internal::c_begin(sequence),
                           container_algorithm_internal::c_end(sequence));
 }
@@ -1509,8 +1543,9 @@
 // Overload of c_min_element() for performing a `comp` comparison other than
 // `operator<`.
 template <typename Sequence, typename LessThan>
-container_algorithm_internal::ContainerIter<Sequence> c_min_element(
-    Sequence& sequence, LessThan&& comp) {
+ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17
+    container_algorithm_internal::ContainerIter<Sequence>
+    c_min_element(Sequence& sequence, LessThan&& comp) {
   return std::min_element(container_algorithm_internal::c_begin(sequence),
                           container_algorithm_internal::c_end(sequence),
                           std::forward<LessThan>(comp));
@@ -1522,8 +1557,9 @@
 // to return an iterator pointing to the element with the largest value, using
 // `operator<` to make the comparisons.
 template <typename Sequence>
-container_algorithm_internal::ContainerIter<Sequence> c_max_element(
-    Sequence& sequence) {
+ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17
+    container_algorithm_internal::ContainerIter<Sequence>
+    c_max_element(Sequence& sequence) {
   return std::max_element(container_algorithm_internal::c_begin(sequence),
                           container_algorithm_internal::c_end(sequence));
 }
@@ -1531,8 +1567,9 @@
 // Overload of c_max_element() for performing a `comp` comparison other than
 // `operator<`.
 template <typename Sequence, typename LessThan>
-container_algorithm_internal::ContainerIter<Sequence> c_max_element(
-    Sequence& sequence, LessThan&& comp) {
+ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17
+    container_algorithm_internal::ContainerIter<Sequence>
+    c_max_element(Sequence& sequence, LessThan&& comp) {
   return std::max_element(container_algorithm_internal::c_begin(sequence),
                           container_algorithm_internal::c_end(sequence),
                           std::forward<LessThan>(comp));
@@ -1545,8 +1582,9 @@
 // smallest and largest values, respectively, using `operator<` to make the
 // comparisons.
 template <typename C>
-container_algorithm_internal::ContainerIterPairType<C, C> c_minmax_element(
-    C& c) {
+ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17
+    container_algorithm_internal::ContainerIterPairType<C, C>
+    c_minmax_element(C& c) {
   return std::minmax_element(container_algorithm_internal::c_begin(c),
                              container_algorithm_internal::c_end(c));
 }
@@ -1554,8 +1592,9 @@
 // Overload of c_minmax_element() for performing `comp` comparisons other than
 // `operator<`.
 template <typename C, typename LessThan>
-container_algorithm_internal::ContainerIterPairType<C, C> c_minmax_element(
-    C& c, LessThan&& comp) {
+ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17
+    container_algorithm_internal::ContainerIterPairType<C, C>
+    c_minmax_element(C& c, LessThan&& comp) {
   return std::minmax_element(container_algorithm_internal::c_begin(c),
                              container_algorithm_internal::c_end(c),
                              std::forward<LessThan>(comp));
diff --git a/absl/algorithm/container_test.cc b/absl/algorithm/container_test.cc
index c01f5fc..5012224 100644
--- a/absl/algorithm/container_test.cc
+++ b/absl/algorithm/container_test.cc
@@ -15,6 +15,7 @@
 #include "absl/algorithm/container.h"
 
 #include <algorithm>
+#include <array>
 #include <functional>
 #include <initializer_list>
 #include <iterator>
@@ -31,6 +32,7 @@
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
 #include "absl/base/casts.h"
+#include "absl/base/config.h"
 #include "absl/base/macros.h"
 #include "absl/memory/memory.h"
 #include "absl/types/span.h"
@@ -113,6 +115,11 @@
   absl::c_find(absl::implicit_cast<const std::list<int>&>(sequence_), 3);
 }
 
+TEST_F(NonMutatingTest, Contains) {
+  EXPECT_TRUE(absl::c_contains(container_, 3));
+  EXPECT_FALSE(absl::c_contains(container_, 4));
+}
+
 TEST_F(NonMutatingTest, FindIf) { absl::c_find_if(container_, Predicate); }
 
 TEST_F(NonMutatingTest, FindIfNot) {
@@ -305,6 +312,17 @@
   absl::c_search(vector_, sequence_, BinPredicate);
 }
 
+TEST_F(NonMutatingTest, ContainsSubrange) {
+  EXPECT_TRUE(absl::c_contains_subrange(sequence_, vector_));
+  EXPECT_TRUE(absl::c_contains_subrange(vector_, sequence_));
+  EXPECT_TRUE(absl::c_contains_subrange(array_, sequence_));
+}
+
+TEST_F(NonMutatingTest, ContainsSubrangeWithPredicate) {
+  EXPECT_TRUE(absl::c_contains_subrange(sequence_, vector_, Equals));
+  EXPECT_TRUE(absl::c_contains_subrange(vector_, sequence_, Equals));
+}
+
 TEST_F(NonMutatingTest, SearchN) { absl::c_search_n(sequence_, 3, 1); }
 
 TEST_F(NonMutatingTest, SearchNWithPredicate) {
@@ -1144,4 +1162,49 @@
   EXPECT_EQ(initial, permuted);
 }
 
+#if defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
+    ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
+TEST(ConstexprTest, Distance) {
+  // Works at compile time with constexpr containers.
+  static_assert(absl::c_distance(std::array<int, 3>()) == 3);
+}
+
+TEST(ConstexprTest, MinElement) {
+  constexpr std::array<int, 3> kArray = {1, 2, 3};
+  static_assert(*absl::c_min_element(kArray) == 1);
+}
+
+TEST(ConstexprTest, MinElementWithPredicate) {
+  constexpr std::array<int, 3> kArray = {1, 2, 3};
+  static_assert(*absl::c_min_element(kArray, std::greater<int>()) == 3);
+}
+
+TEST(ConstexprTest, MaxElement) {
+  constexpr std::array<int, 3> kArray = {1, 2, 3};
+  static_assert(*absl::c_max_element(kArray) == 3);
+}
+
+TEST(ConstexprTest, MaxElementWithPredicate) {
+  constexpr std::array<int, 3> kArray = {1, 2, 3};
+  static_assert(*absl::c_max_element(kArray, std::greater<int>()) == 1);
+}
+
+TEST(ConstexprTest, MinMaxElement) {
+  static constexpr std::array<int, 3> kArray = {1, 2, 3};
+  constexpr auto kMinMaxPair = absl::c_minmax_element(kArray);
+  static_assert(*kMinMaxPair.first == 1);
+  static_assert(*kMinMaxPair.second == 3);
+}
+
+TEST(ConstexprTest, MinMaxElementWithPredicate) {
+  static constexpr std::array<int, 3> kArray = {1, 2, 3};
+  constexpr auto kMinMaxPair =
+      absl::c_minmax_element(kArray, std::greater<int>());
+  static_assert(*kMinMaxPair.first == 3);
+  static_assert(*kMinMaxPair.second == 1);
+}
+
+#endif  // defined(ABSL_INTERNAL_CPLUSPLUS_LANG) &&
+        //  ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
+
 }  // namespace
diff --git a/absl/base/BUILD.bazel b/absl/base/BUILD.bazel
index 0eb735d..96503c9 100644
--- a/absl/base/BUILD.bazel
+++ b/absl/base/BUILD.bazel
@@ -74,7 +74,10 @@
     hdrs = ["no_destructor.h"],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
-    deps = [":config"],
+    deps = [
+        ":config",
+        ":nullability",
+    ],
 )
 
 cc_library(
@@ -84,6 +87,7 @@
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
+        ":config",
         ":core_headers",
         "//absl/meta:type_traits",
     ],
@@ -294,7 +298,7 @@
 
 cc_library(
     name = "atomic_hook_test_helper",
-    testonly = 1,
+    testonly = True,
     srcs = ["internal/atomic_hook_test_helper.cc"],
     hdrs = ["internal/atomic_hook_test_helper.h"],
     copts = ABSL_DEFAULT_COPTS,
@@ -336,6 +340,18 @@
     ],
 )
 
+cc_test(
+    name = "c_header_test",
+    srcs = ["c_header_test.c"],
+    tags = [
+        "no_test_wasm",
+    ],
+    deps = [
+        ":config",
+        ":core_headers",
+    ],
+)
+
 cc_library(
     name = "throw_delegate",
     srcs = ["internal/throw_delegate.cc"],
@@ -380,7 +396,7 @@
 
 cc_library(
     name = "exception_testing",
-    testonly = 1,
+    testonly = True,
     hdrs = ["internal/exception_testing.h"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
@@ -404,7 +420,7 @@
 
 cc_library(
     name = "exception_safety_testing",
-    testonly = 1,
+    testonly = True,
     srcs = ["internal/exception_safety_testing.cc"],
     hdrs = ["internal/exception_safety_testing.h"],
     copts = ABSL_TEST_COPTS,
@@ -470,7 +486,7 @@
 # AbslInternalSpinLockDelay and AbslInternalSpinLockWake.
 cc_library(
     name = "spinlock_test_common",
-    testonly = 1,
+    testonly = True,
     srcs = ["spinlock_test_common.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
@@ -507,7 +523,7 @@
 
 cc_library(
     name = "spinlock_benchmark_common",
-    testonly = 1,
+    testonly = True,
     srcs = ["internal/spinlock_benchmark.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
@@ -527,7 +543,7 @@
 
 cc_binary(
     name = "spinlock_benchmark",
-    testonly = 1,
+    testonly = True,
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
     tags = ["benchmark"],
@@ -608,7 +624,7 @@
 
 cc_binary(
     name = "no_destructor_benchmark",
-    testonly = 1,
+    testonly = True,
     srcs = ["no_destructor_benchmark.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
@@ -710,7 +726,7 @@
 
 cc_library(
     name = "scoped_set_env",
-    testonly = 1,
+    testonly = True,
     srcs = ["internal/scoped_set_env.cc"],
     hdrs = ["internal/scoped_set_env.h"],
     linkopts = ABSL_DEFAULT_LINKOPTS,
@@ -784,7 +800,7 @@
 
 cc_binary(
     name = "strerror_benchmark",
-    testonly = 1,
+    testonly = True,
     srcs = ["internal/strerror_benchmark.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
@@ -850,6 +866,41 @@
     ],
 )
 
+cc_library(
+    name = "poison",
+    srcs = [
+        "internal/poison.cc",
+    ],
+    hdrs = ["internal/poison.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+        "//absl:__subpackages__",
+    ],
+    deps = [
+        ":config",
+        ":core_headers",
+        ":malloc_internal",
+    ],
+)
+
+cc_test(
+    name = "poison_test",
+    size = "small",
+    timeout = "short",
+    srcs = [
+        "internal/poison_test.cc",
+    ],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":config",
+        ":poison",
+        "@com_google_googletest//:gtest",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
 cc_test(
     name = "unique_small_name_test",
     size = "small",
diff --git a/absl/base/CMakeLists.txt b/absl/base/CMakeLists.txt
index 4cfc228..97994fc 100644
--- a/absl/base/CMakeLists.txt
+++ b/absl/base/CMakeLists.txt
@@ -62,6 +62,7 @@
     "no_destructor.h"
   DEPS
     absl::config
+    absl::nullability
   COPTS
     ${ABSL_DEFAULT_COPTS}
 )
@@ -74,6 +75,7 @@
   SRCS
     "internal/nullability_impl.h"
   DEPS
+    absl::config
     absl::core_headers
     absl::type_traits
   COPTS
@@ -737,3 +739,33 @@
     absl::optional
     GTest::gtest_main
 )
+
+absl_cc_library(
+  NAME
+    poison
+  SRCS
+    "internal/poison.cc"
+  HDRS
+    "internal/poison.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::config
+    absl::core_headers
+    absl::malloc_internal
+)
+
+absl_cc_test(
+  NAME
+    poison_test
+  SRCS
+    "internal/poison_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::config
+    absl::poison
+    GTest::gtest_main
+)
diff --git a/absl/base/attributes.h b/absl/base/attributes.h
index d4f67a1..5ea5ee3 100644
--- a/absl/base/attributes.h
+++ b/absl/base/attributes.h
@@ -195,6 +195,9 @@
 // ABSL_ATTRIBUTE_NORETURN
 //
 // Tells the compiler that a given function never returns.
+//
+// Deprecated: Prefer the `[[noreturn]]` attribute standardized by C++11 over
+// this macro.
 #if ABSL_HAVE_ATTRIBUTE(noreturn) || (defined(__GNUC__) && !defined(__clang__))
 #define ABSL_ATTRIBUTE_NORETURN __attribute__((noreturn))
 #elif defined(_MSC_VER)
@@ -702,6 +705,11 @@
   _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"")
 #define ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING \
   _Pragma("GCC diagnostic pop")
+#elif defined(_MSC_VER)
+#define ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING \
+  _Pragma("warning(push)") _Pragma("warning(disable: 4996)")
+#define ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING \
+  _Pragma("warning(pop)")
 #else
 #define ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
 #define ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
@@ -808,14 +816,43 @@
 //
 // See also the upstream documentation:
 // https://clang.llvm.org/docs/AttributeReference.html#lifetimebound
+// https://learn.microsoft.com/en-us/cpp/code-quality/c26816?view=msvc-170
 #if ABSL_HAVE_CPP_ATTRIBUTE(clang::lifetimebound)
 #define ABSL_ATTRIBUTE_LIFETIME_BOUND [[clang::lifetimebound]]
+#elif ABSL_HAVE_CPP_ATTRIBUTE(msvc::lifetimebound)
+#define ABSL_ATTRIBUTE_LIFETIME_BOUND [[msvc::lifetimebound]]
 #elif ABSL_HAVE_ATTRIBUTE(lifetimebound)
 #define ABSL_ATTRIBUTE_LIFETIME_BOUND __attribute__((lifetimebound))
 #else
 #define ABSL_ATTRIBUTE_LIFETIME_BOUND
 #endif
 
+// ABSL_INTERNAL_ATTRIBUTE_VIEW indicates that a type acts like a view i.e. a
+// raw (non-owning) pointer. This enables diagnoses similar to those enabled by
+// ABSL_ATTRIBUTE_LIFETIME_BOUND.
+//
+// See the following links for details:
+// https://reviews.llvm.org/D64448
+// https://lists.llvm.org/pipermail/cfe-dev/2018-November/060355.html
+#if ABSL_HAVE_CPP_ATTRIBUTE(gsl::Pointer)
+#define ABSL_INTERNAL_ATTRIBUTE_VIEW [[gsl::Pointer]]
+#else
+#define ABSL_INTERNAL_ATTRIBUTE_VIEW
+#endif
+
+// ABSL_INTERNAL_ATTRIBUTE_OWNER indicates that a type acts like a smart
+// (owning) pointer. This enables diagnoses similar to those enabled by
+// ABSL_ATTRIBUTE_LIFETIME_BOUND.
+//
+// See the following links for details:
+// https://reviews.llvm.org/D64448
+// https://lists.llvm.org/pipermail/cfe-dev/2018-November/060355.html
+#if ABSL_HAVE_CPP_ATTRIBUTE(gsl::Owner)
+#define ABSL_INTERNAL_ATTRIBUTE_OWNER [[gsl::Owner]]
+#else
+#define ABSL_INTERNAL_ATTRIBUTE_OWNER
+#endif
+
 // ABSL_ATTRIBUTE_TRIVIAL_ABI
 // Indicates that a type is "trivially relocatable" -- meaning it can be
 // relocated without invoking the constructor/destructor, using a form of move
@@ -871,4 +908,51 @@
 #define ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS
 #endif
 
+// ABSL_ATTRIBUTE_UNINITIALIZED
+//
+// GCC and Clang support a flag `-ftrivial-auto-var-init=<option>` (<option>
+// can be "zero" or "pattern") that can be used to initialize automatic stack
+// variables. Variables with this attribute will be left uninitialized,
+// overriding the compiler flag.
+//
+// See https://clang.llvm.org/docs/AttributeReference.html#uninitialized
+// and https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-uninitialized-variable-attribute
+#if ABSL_HAVE_CPP_ATTRIBUTE(clang::uninitialized)
+#define ABSL_ATTRIBUTE_UNINITIALIZED [[clang::uninitialized]]
+#elif ABSL_HAVE_CPP_ATTRIBUTE(gnu::uninitialized)
+#define ABSL_ATTRIBUTE_UNINITIALIZED [[gnu::uninitialized]]
+#elif ABSL_HAVE_ATTRIBUTE(uninitialized)
+#define ABSL_ATTRIBUTE_UNINITIALIZED __attribute__((uninitialized))
+#else
+#define ABSL_ATTRIBUTE_UNINITIALIZED
+#endif
+
+// ABSL_ATTRIBUTE_WARN_UNUSED
+//
+// Compilers routinely warn about trivial variables that are unused.  For
+// non-trivial types, this warning is suppressed since the
+// constructor/destructor may be intentional and load-bearing, for example, with
+// a RAII scoped lock.
+//
+// For example:
+//
+// class ABSL_ATTRIBUTE_WARN_UNUSED MyType {
+//  public:
+//   MyType();
+//   ~MyType();
+// };
+//
+// void foo() {
+//   // Warns with ABSL_ATTRIBUTE_WARN_UNUSED attribute present.
+//   MyType unused;
+// }
+//
+// See https://clang.llvm.org/docs/AttributeReference.html#warn-unused and
+// https://gcc.gnu.org/onlinedocs/gcc/C_002b_002b-Attributes.html#index-warn_005funused-type-attribute
+#if ABSL_HAVE_CPP_ATTRIBUTE(gnu::warn_unused)
+#define ABSL_ATTRIBUTE_WARN_UNUSED [[gnu::warn_unused]]
+#else
+#define ABSL_ATTRIBUTE_WARN_UNUSED
+#endif
+
 #endif  // ABSL_BASE_ATTRIBUTES_H_
diff --git a/absl/base/c_header_test.c b/absl/base/c_header_test.c
new file mode 100644
index 0000000..3cd0177
--- /dev/null
+++ b/absl/base/c_header_test.c
@@ -0,0 +1,30 @@
+// Copyright 2024 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifdef __cplusplus
+#error This is a C compile test
+#endif
+
+// This test ensures that headers that are included in legacy C code are
+// compatible with C. Abseil is a C++ library. We do not desire to expand C
+// compatibility or keep C compatibility forever. This test only exists to
+// ensure C compatibility until it is no longer required. Do not add new code
+// that requires C compatibility.
+#include "absl/base/attributes.h"     // IWYU pragma: keep
+#include "absl/base/config.h"         // IWYU pragma: keep
+#include "absl/base/optimization.h"   // IWYU pragma: keep
+#include "absl/base/policy_checks.h"  // IWYU pragma: keep
+#include "absl/base/port.h"           // IWYU pragma: keep
+
+int main() { return 0; }
diff --git a/absl/base/config.h b/absl/base/config.h
index c9165ac..0b22167 100644
--- a/absl/base/config.h
+++ b/absl/base/config.h
@@ -117,8 +117,8 @@
 //
 // LTS releases can be obtained from
 // https://github.com/abseil/abseil-cpp/releases.
-#define ABSL_LTS_RELEASE_VERSION 20240116
-#define ABSL_LTS_RELEASE_PATCH_LEVEL 1
+#define ABSL_LTS_RELEASE_VERSION 20240722
+#define ABSL_LTS_RELEASE_PATCH_LEVEL 0
 
 // Helper macro to convert a CPP variable to a string literal.
 #define ABSL_INTERNAL_DO_TOKEN_STR(x) #x
@@ -231,12 +231,11 @@
 #endif
 
 // ABSL_HAVE_TLS is defined to 1 when __thread should be supported.
-// We assume __thread is supported on Linux or Asylo when compiled with Clang or
+// We assume __thread is supported on Linux when compiled with Clang or
 // compiled against libstdc++ with _GLIBCXX_HAVE_TLS defined.
 #ifdef ABSL_HAVE_TLS
 #error ABSL_HAVE_TLS cannot be directly set
-#elif (defined(__linux__) || defined(__ASYLO__)) && \
-    (defined(__clang__) || defined(_GLIBCXX_HAVE_TLS))
+#elif (defined(__linux__)) && (defined(__clang__) || defined(_GLIBCXX_HAVE_TLS))
 #define ABSL_HAVE_TLS 1
 #endif
 
@@ -275,52 +274,17 @@
 #define ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE 1
 #endif
 
+
 // ABSL_HAVE_THREAD_LOCAL
 //
+// DEPRECATED - `thread_local` is available on all supported platforms.
 // Checks whether C++11's `thread_local` storage duration specifier is
 // supported.
 #ifdef ABSL_HAVE_THREAD_LOCAL
 #error ABSL_HAVE_THREAD_LOCAL cannot be directly set
-#elif defined(__APPLE__)
-// Notes:
-// * Xcode's clang did not support `thread_local` until version 8, and
-//   even then not for all iOS < 9.0.
-// * Xcode 9.3 started disallowing `thread_local` for 32-bit iOS simulator
-//   targeting iOS 9.x.
-// * Xcode 10 moves the deployment target check for iOS < 9.0 to link time
-//   making ABSL_HAVE_FEATURE unreliable there.
-//
-#if ABSL_HAVE_FEATURE(cxx_thread_local) && \
-    !(TARGET_OS_IPHONE && __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0)
+#else
 #define ABSL_HAVE_THREAD_LOCAL 1
 #endif
-#else  // !defined(__APPLE__)
-#define ABSL_HAVE_THREAD_LOCAL 1
-#endif
-
-// There are platforms for which TLS should not be used even though the compiler
-// makes it seem like it's supported (Android NDK < r12b for example).
-// This is primarily because of linker problems and toolchain misconfiguration:
-// Abseil does not intend to support this indefinitely. Currently, the newest
-// toolchain that we intend to support that requires this behavior is the
-// r11 NDK - allowing for a 5 year support window on that means this option
-// is likely to be removed around June of 2021.
-// TLS isn't supported until NDK r12b per
-// https://developer.android.com/ndk/downloads/revision_history.html
-// Since NDK r16, `__NDK_MAJOR__` and `__NDK_MINOR__` are defined in
-// <android/ndk-version.h>. For NDK < r16, users should define these macros,
-// e.g. `-D__NDK_MAJOR__=11 -D__NKD_MINOR__=0` for NDK r11.
-#if defined(__ANDROID__) && defined(__clang__)
-#if __has_include(<android/ndk-version.h>)
-#include <android/ndk-version.h>
-#endif  // __has_include(<android/ndk-version.h>)
-#if defined(__ANDROID__) && defined(__clang__) && defined(__NDK_MAJOR__) && \
-    defined(__NDK_MINOR__) &&                                               \
-    ((__NDK_MAJOR__ < 12) || ((__NDK_MAJOR__ == 12) && (__NDK_MINOR__ < 1)))
-#undef ABSL_HAVE_TLS
-#undef ABSL_HAVE_THREAD_LOCAL
-#endif
-#endif  // defined(__ANDROID__) && defined(__clang__)
 
 // ABSL_HAVE_INTRINSIC_INT128
 //
@@ -379,9 +343,7 @@
 #define ABSL_HAVE_EXCEPTIONS 1
 #endif  // defined(__EXCEPTIONS) && ABSL_HAVE_FEATURE(cxx_exceptions)
 // Handle remaining special cases and default to exceptions being supported.
-#elif !(defined(__GNUC__) && (__GNUC__ < 5) && !defined(__EXCEPTIONS)) && \
-    !(ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(5, 0) &&                        \
-      !defined(__cpp_exceptions)) &&                                      \
+#elif !(defined(__GNUC__) && !defined(__cpp_exceptions)) && \
     !(defined(_MSC_VER) && !defined(_CPPUNWIND))
 #define ABSL_HAVE_EXCEPTIONS 1
 #endif
@@ -416,9 +378,9 @@
 #elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) ||    \
     defined(_AIX) || defined(__ros__) || defined(__native_client__) ||       \
     defined(__asmjs__) || defined(__EMSCRIPTEN__) || defined(__Fuchsia__) || \
-    defined(__sun) || defined(__ASYLO__) || defined(__myriad2__) ||          \
-    defined(__HAIKU__) || defined(__OpenBSD__) || defined(__NetBSD__) ||     \
-    defined(__QNX__) || defined(__VXWORKS__) || defined(__hexagon__)
+    defined(__sun) || defined(__myriad2__) || defined(__HAIKU__) ||          \
+    defined(__OpenBSD__) || defined(__NetBSD__) || defined(__QNX__) ||       \
+    defined(__VXWORKS__) || defined(__hexagon__)
 #define ABSL_HAVE_MMAP 1
 #endif
 
@@ -902,9 +864,7 @@
 #error ABSL_INTERNAL_HAS_CXA_DEMANGLE cannot be directly set
 #elif defined(OS_ANDROID) && (defined(__i386__) || defined(__x86_64__))
 #define ABSL_INTERNAL_HAS_CXA_DEMANGLE 0
-#elif defined(__GNUC__) && defined(__GNUC_MINOR__) &&            \
-    (__GNUC__ >= 4 || (__GNUC__ >= 3 && __GNUC_MINOR__ >= 4)) && \
-    !defined(__mips__)
+#elif defined(__GNUC__)
 #define ABSL_INTERNAL_HAS_CXA_DEMANGLE 1
 #elif defined(__clang__) && !defined(_MSC_VER)
 #define ABSL_INTERNAL_HAS_CXA_DEMANGLE 1
@@ -981,6 +941,27 @@
 #define ABSL_HAVE_CONSTANT_EVALUATED 1
 #endif
 
+// ABSL_INTERNAL_CONSTEXPR_SINCE_CXXYY is used to conditionally define constexpr
+// for different C++ versions.
+//
+// These macros are an implementation detail and will be unconditionally removed
+// once the minimum supported C++ version catches up to a given version.
+//
+// For this reason, this symbol is considered INTERNAL and code outside of
+// Abseil must not use it.
+#if defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
+    ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
+#define ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17 constexpr
+#else
+#define ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17
+#endif
+#if defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
+    ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L
+#define ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 constexpr
+#else
+#define ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
+#endif
+
 // ABSL_INTERNAL_EMSCRIPTEN_VERSION combines Emscripten's three version macros
 // into an integer that can be compared against.
 #ifdef ABSL_INTERNAL_EMSCRIPTEN_VERSION
diff --git a/absl/base/dynamic_annotations.h b/absl/base/dynamic_annotations.h
index 7ba8912..f18b5e0 100644
--- a/absl/base/dynamic_annotations.h
+++ b/absl/base/dynamic_annotations.h
@@ -252,25 +252,9 @@
 
 #else  // !defined(ABSL_HAVE_MEMORY_SANITIZER)
 
-// TODO(rogeeff): remove this branch
-#ifdef ABSL_HAVE_THREAD_SANITIZER
-#define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
-  do {                                                     \
-    (void)(address);                                       \
-    (void)(size);                                          \
-  } while (0)
-#define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \
-  do {                                                       \
-    (void)(address);                                         \
-    (void)(size);                                            \
-  } while (0)
-#else
-
 #define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size)    // empty
 #define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size)  // empty
 
-#endif
-
 #endif  // ABSL_HAVE_MEMORY_SANITIZER
 
 // -------------------------------------------------------------------------
diff --git a/absl/base/internal/nullability_impl.h b/absl/base/internal/nullability_impl.h
index 36e1b33..03fa243 100644
--- a/absl/base/internal/nullability_impl.h
+++ b/absl/base/internal/nullability_impl.h
@@ -19,10 +19,11 @@
 #include <type_traits>
 
 #include "absl/base/attributes.h"
+#include "absl/base/config.h"
 #include "absl/meta/type_traits.h"
 
 namespace absl {
-
+ABSL_NAMESPACE_BEGIN
 namespace nullability_internal {
 
 // `IsNullabilityCompatible` checks whether its first argument is a class
@@ -101,6 +102,7 @@
     = T;
 
 }  // namespace nullability_internal
+ABSL_NAMESPACE_END
 }  // namespace absl
 
 #endif  // ABSL_BASE_INTERNAL_NULLABILITY_IMPL_H_
diff --git a/absl/base/internal/poison.cc b/absl/base/internal/poison.cc
new file mode 100644
index 0000000..b33d4c2
--- /dev/null
+++ b/absl/base/internal/poison.cc
@@ -0,0 +1,84 @@
+// Copyright 2024 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/poison.h"
+
+#include <cstdlib>
+
+#include "absl/base/config.h"
+#include "absl/base/internal/direct_mmap.h"
+
+#ifndef _WIN32
+#include <unistd.h>
+#endif
+
+#if defined(ABSL_HAVE_ADDRESS_SANITIZER)
+#include <sanitizer/asan_interface.h>
+#elif defined(ABSL_HAVE_MEMORY_SANITIZER)
+#include <sanitizer/msan_interface.h>
+#elif defined(ABSL_HAVE_MMAP)
+#include <sys/mman.h>
+#endif
+
+#if defined(_WIN32)
+#include <windows.h>
+#endif
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+namespace {
+
+size_t GetPageSize() {
+#ifdef _WIN32
+  SYSTEM_INFO system_info;
+  GetSystemInfo(&system_info);
+  return system_info.dwPageSize;
+#elif defined(__wasm__) || defined(__asmjs__) || defined(__hexagon__)
+  return getpagesize();
+#else
+  return static_cast<size_t>(sysconf(_SC_PAGESIZE));
+#endif
+}
+
+}  // namespace
+
+void* InitializePoisonedPointerInternal() {
+  const size_t block_size = GetPageSize();
+#if defined(ABSL_HAVE_ADDRESS_SANITIZER)
+  void* data = malloc(block_size);
+  ASAN_POISON_MEMORY_REGION(data, block_size);
+#elif defined(ABSL_HAVE_MEMORY_SANITIZER)
+  void* data = malloc(block_size);
+  __msan_poison(data, block_size);
+#elif defined(ABSL_HAVE_MMAP)
+  void* data = DirectMmap(nullptr, block_size, PROT_NONE,
+                          MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+  if (data == MAP_FAILED) return GetBadPointerInternal();
+#elif defined(_WIN32)
+  void* data = VirtualAlloc(nullptr, block_size, MEM_RESERVE | MEM_COMMIT,
+                            PAGE_NOACCESS);
+  if (data == nullptr) return GetBadPointerInternal();
+#else
+  return GetBadPointerInternal();
+#endif
+  // Return the middle of the block so that dereferences before and after the
+  // pointer will both crash.
+  return static_cast<char*>(data) + block_size / 2;
+}
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/absl/base/internal/poison.h b/absl/base/internal/poison.h
new file mode 100644
index 0000000..28113bd
--- /dev/null
+++ b/absl/base/internal/poison.h
@@ -0,0 +1,59 @@
+// Copyright 2024 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_POISON_H_
+#define ABSL_BASE_INTERNAL_POISON_H_
+
+#include <cstdint>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+inline void* GetBadPointerInternal() {
+  // A likely bad pointer. Pointers are required to have high bits that are all
+  // zero or all one for certain 64-bit CPUs. This pointer value will hopefully
+  // cause a crash on dereference and also be clearly recognizable as invalid.
+  constexpr uint64_t kBadPtr = 0xBAD0BAD0BAD0BAD0;
+  auto ret = reinterpret_cast<void*>(static_cast<uintptr_t>(kBadPtr));
+#ifndef _MSC_VER  // MSVC doesn't support inline asm with `volatile`.
+  // Try to prevent the compiler from optimizing out the undefined behavior.
+  asm volatile("" : : "r"(ret) :);  // NOLINT
+#endif
+  return ret;
+}
+
+void* InitializePoisonedPointerInternal();
+
+inline void* get_poisoned_pointer() {
+#if defined(NDEBUG) && !defined(ABSL_HAVE_ADDRESS_SANITIZER) && \
+    !defined(ABSL_HAVE_MEMORY_SANITIZER)
+  // In optimized non-sanitized builds, avoid the function-local static because
+  // of the codegen and runtime cost.
+  return GetBadPointerInternal();
+#else
+  // Non-optimized builds may use more robust implementation. Note that we can't
+  // use a static global because Chromium doesn't allow non-constinit globals.
+  static void* ptr = InitializePoisonedPointerInternal();
+  return ptr;
+#endif
+}
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_POISON_H_
diff --git a/absl/base/internal/poison_test.cc b/absl/base/internal/poison_test.cc
new file mode 100644
index 0000000..6596b45
--- /dev/null
+++ b/absl/base/internal/poison_test.cc
@@ -0,0 +1,41 @@
+// Copyright 2024 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/poison.h"
+
+#include <iostream>
+
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+namespace {
+
+TEST(PoisonTest, CrashesOnDereference) {
+#ifdef __ANDROID__
+  GTEST_SKIP() << "On Android, poisoned pointer dereference times out instead "
+                  "of crashing.";
+#endif
+  int* poisoned_ptr = static_cast<int*>(get_poisoned_pointer());
+  EXPECT_DEATH_IF_SUPPORTED(std::cout << *poisoned_ptr, "");
+  EXPECT_DEATH_IF_SUPPORTED(std::cout << *(poisoned_ptr - 10), "");
+  EXPECT_DEATH_IF_SUPPORTED(std::cout << *(poisoned_ptr + 10), "");
+}
+
+}  // namespace
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/absl/base/internal/spinlock.h b/absl/base/internal/spinlock.h
index 2929cd6..1bb260f 100644
--- a/absl/base/internal/spinlock.h
+++ b/absl/base/internal/spinlock.h
@@ -53,7 +53,7 @@
 ABSL_NAMESPACE_BEGIN
 namespace base_internal {
 
-class ABSL_LOCKABLE SpinLock {
+class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED SpinLock {
  public:
   SpinLock() : lockword_(kSpinLockCooperative) {
     ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
@@ -89,7 +89,8 @@
   // acquisition was successful.  If the lock was not acquired, false is
   // returned.  If this SpinLock is free at the time of the call, TryLock
   // will return true with high probability.
-  inline bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
+  ABSL_MUST_USE_RESULT inline bool TryLock()
+      ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
     ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
     bool res = TryLockImpl();
     ABSL_TSAN_MUTEX_POST_LOCK(
@@ -120,7 +121,7 @@
   // Determine if the lock is held.  When the lock is held by the invoking
   // thread, true will always be returned. Intended to be used as
   // CHECK(lock.IsHeld()).
-  inline bool IsHeld() const {
+  ABSL_MUST_USE_RESULT inline bool IsHeld() const {
     return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0;
   }
 
@@ -202,6 +203,15 @@
 
 // Corresponding locker object that arranges to acquire a spinlock for
 // the duration of a C++ scope.
+//
+// TODO(b/176172494): Use only [[nodiscard]] when baseline is raised.
+// TODO(b/6695610): Remove forward declaration when #ifdef is no longer needed.
+#if ABSL_HAVE_CPP_ATTRIBUTE(nodiscard)
+class [[nodiscard]] SpinLockHolder;
+#else
+class ABSL_MUST_USE_RESULT ABSL_ATTRIBUTE_TRIVIAL_ABI SpinLockHolder;
+#endif
+
 class ABSL_SCOPED_LOCKABLE SpinLockHolder {
  public:
   inline explicit SpinLockHolder(SpinLock* l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l)
diff --git a/absl/base/internal/unscaledcycleclock.cc b/absl/base/internal/unscaledcycleclock.cc
index 05e0e7b..a0bf3a6 100644
--- a/absl/base/internal/unscaledcycleclock.cc
+++ b/absl/base/internal/unscaledcycleclock.cc
@@ -121,18 +121,6 @@
   return aarch64_timer_frequency;
 }
 
-#elif defined(__riscv)
-
-int64_t UnscaledCycleClock::Now() {
-  int64_t virtual_timer_value;
-  asm volatile("rdcycle %0" : "=r"(virtual_timer_value));
-  return virtual_timer_value;
-}
-
-double UnscaledCycleClock::Frequency() {
-  return base_internal::NominalCPUFrequency();
-}
-
 #elif defined(_M_IX86) || defined(_M_X64)
 
 #pragma intrinsic(__rdtsc)
diff --git a/absl/base/internal/unscaledcycleclock_config.h b/absl/base/internal/unscaledcycleclock_config.h
index 24b324a..43a3dab 100644
--- a/absl/base/internal/unscaledcycleclock_config.h
+++ b/absl/base/internal/unscaledcycleclock_config.h
@@ -21,8 +21,8 @@
 
 // The following platforms have an implementation of a hardware counter.
 #if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \
-    defined(__powerpc__) || defined(__ppc__) || defined(__riscv) ||     \
-    defined(_M_IX86) || (defined(_M_X64) && !defined(_M_ARM64EC))
+    defined(__powerpc__) || defined(__ppc__) || defined(_M_IX86) ||     \
+    (defined(_M_X64) && !defined(_M_ARM64EC))
 #define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 1
 #else
 #define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 0
@@ -53,8 +53,8 @@
 #if ABSL_USE_UNSCALED_CYCLECLOCK
 // This macro can be used to test if UnscaledCycleClock::Frequency()
 // is NominalCPUFrequency() on a particular platform.
-#if (defined(__i386__) || defined(__x86_64__) || defined(__riscv) || \
-     defined(_M_IX86) || defined(_M_X64))
+#if (defined(__i386__) || defined(__x86_64__) || defined(_M_IX86) || \
+     defined(_M_X64))
 #define ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
 #endif
 #endif
diff --git a/absl/base/macros.h b/absl/base/macros.h
index f33cd19..b318f11 100644
--- a/absl/base/macros.h
+++ b/absl/base/macros.h
@@ -138,4 +138,52 @@
 #define ABSL_INTERNAL_RETHROW do {} while (false)
 #endif  // ABSL_HAVE_EXCEPTIONS
 
+// ABSL_DEPRECATE_AND_INLINE()
+//
+// Marks a function or type alias as deprecated and tags it to be picked up for
+// automated refactoring by go/cpp-inliner. It can added to inline function
+// definitions or type aliases. It should only be used within a header file. It
+// differs from `ABSL_DEPRECATED` in the following ways:
+//
+// 1. New uses of the function or type will be discouraged via Tricorder
+//    warnings.
+// 2. If enabled via `METADATA`, automated changes will be sent out inlining the
+//    functions's body or replacing the type where it is used.
+//
+// For example:
+//
+// ABSL_DEPRECATE_AND_INLINE() inline int OldFunc(int x) {
+//   return NewFunc(x, 0);
+// }
+//
+// will mark `OldFunc` as deprecated, and the go/cpp-inliner service will
+// replace calls to `OldFunc(x)` with calls to `NewFunc(x, 0)`. Once all calls
+// to `OldFunc` have been replaced, `OldFunc` can be deleted.
+//
+// See go/cpp-inliner for more information.
+//
+// Note: go/cpp-inliner is Google-internal service for automated refactoring.
+// While open-source users do not have access to this service, the macro is
+// provided for compatibility, and so that users receive deprecation warnings.
+#if ABSL_HAVE_CPP_ATTRIBUTE(deprecated) && \
+    ABSL_HAVE_CPP_ATTRIBUTE(clang::annotate)
+#define ABSL_DEPRECATE_AND_INLINE() [[deprecated, clang::annotate("inline-me")]]
+#elif ABSL_HAVE_CPP_ATTRIBUTE(deprecated)
+#define ABSL_DEPRECATE_AND_INLINE() [[deprecated]]
+#else
+#define ABSL_DEPRECATE_AND_INLINE()
+#endif
+
+// Requires the compiler to prove that the size of the given object is at least
+// the expected amount.
+#if ABSL_HAVE_ATTRIBUTE(diagnose_if) && ABSL_HAVE_BUILTIN(__builtin_object_size)
+#define ABSL_INTERNAL_NEED_MIN_SIZE(Obj, N)                     \
+  __attribute__((diagnose_if(__builtin_object_size(Obj, 0) < N, \
+                             "object size provably too small "  \
+                             "(this would corrupt memory)",     \
+                             "error")))
+#else
+#define ABSL_INTERNAL_NEED_MIN_SIZE(Obj, N)
+#endif
+
 #endif  // ABSL_BASE_MACROS_H_
diff --git a/absl/base/no_destructor.h b/absl/base/no_destructor.h
index d4b16a6..43b3540 100644
--- a/absl/base/no_destructor.h
+++ b/absl/base/no_destructor.h
@@ -21,14 +21,13 @@
 // such an object survives during program exit (and can be safely accessed at
 // any time).
 //
-// Objects of such type, if constructed safely and under the right conditions,
-// provide two main benefits over other alternatives:
-//
-//   * Global objects not normally allowed due to concerns of destruction order
-//     (i.e. no "complex globals") can be safely allowed, provided that such
-//     objects can be constant initialized.
-//   * Function scope static objects can be optimized to avoid heap allocation,
-//     pointer chasing, and allow lazy construction.
+// absl::NoDestructor<T> is useful when when a variable has static storage
+// duration but its type has a non-trivial destructor. Global constructors are
+// not recommended because of the C++'s static initialization order fiasco (See
+// https://en.cppreference.com/w/cpp/language/siof). Global destructors are not
+// allowed due to similar concerns about destruction ordering. Using
+// absl::NoDestructor<T> as a function-local static prevents both of these
+// issues.
 //
 // See below for complete details.
 
@@ -41,6 +40,7 @@
 #include <utility>
 
 #include "absl/base/config.h"
+#include "absl/base/nullability.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
@@ -49,8 +49,8 @@
 //
 // NoDestructor<T> is a wrapper around an object of type T that behaves as an
 // object of type T but never calls T's destructor. NoDestructor<T> makes it
-// safer and/or more efficient to use such objects in static storage contexts:
-// as global or function scope static variables.
+// safer and/or more efficient to use such objects in static storage contexts,
+// ideally as function scope static variables.
 //
 // An instance of absl::NoDestructor<T> has similar type semantics to an
 // instance of T:
@@ -61,9 +61,6 @@
 //   `->`, `*`, and `get()`.
 //   (Note that `const NoDestructor<T>` works like a pointer to const `T`.)
 //
-// An object of type NoDestructor<T> should be defined in static storage:
-// as either a global static object, or as a function scope static variable.
-//
 // Additionally, NoDestructor<T> provides the following benefits:
 //
 // * Never calls T's destructor for the object
@@ -71,24 +68,7 @@
 //   lazily constructed.
 //
 // An object of type NoDestructor<T> is "trivially destructible" in the notion
-// that its destructor is never run. Provided that an object of this type can be
-// safely initialized and does not need to be cleaned up on program shutdown,
-// NoDestructor<T> allows you to define global static variables, since Google's
-// C++ style guide ban on such objects doesn't apply to objects that are
-// trivially destructible.
-//
-// Usage as Global Static Variables
-//
-// NoDestructor<T> allows declaration of a global object with a non-trivial
-// constructor in static storage without needing to add a destructor.
-// However, such objects still need to worry about initialization order, so
-// such objects should be const initialized:
-//
-//    // Global or namespace scope.
-//    ABSL_CONST_INIT absl::NoDestructor<MyRegistry> reg{"foo", "bar", 8008};
-//
-// Note that if your object already has a trivial destructor, you don't need to
-// use NoDestructor<T>.
+// that its destructor is never run.
 //
 // Usage as Function Scope Static Variables
 //
@@ -114,6 +94,21 @@
 //     return *x;
 //   }
 //
+// Usage as Global Static Variables
+//
+// NoDestructor<T> allows declaration of a global object of type T that has a
+// non-trivial destructor since its destructor is never run. However, such
+// objects still need to worry about initialization order, so such use is not
+// recommended, strongly discouraged by the Google C++ Style Guide, and outright
+// banned in Chromium.
+// See https://google.github.io/styleguide/cppguide.html#Static_and_Global_Variables
+//
+//    // Global or namespace scope.
+//    absl::NoDestructor<MyRegistry> reg{"foo", "bar", 8008};
+//
+// Note that if your object already has a trivial destructor, you don't need to
+// use NoDestructor<T>.
+//
 template <typename T>
 class NoDestructor {
  public:
@@ -140,11 +135,11 @@
   // Pretend to be a smart pointer to T with deep constness.
   // Never returns a null pointer.
   T& operator*() { return *get(); }
-  T* operator->() { return get(); }
-  T* get() { return impl_.get(); }
+  absl::Nonnull<T*> operator->() { return get(); }
+  absl::Nonnull<T*> get() { return impl_.get(); }
   const T& operator*() const { return *get(); }
-  const T* operator->() const { return get(); }
-  const T* get() const { return impl_.get(); }
+  absl::Nonnull<const T*> operator->() const { return get(); }
+  absl::Nonnull<const T*> get() const { return impl_.get(); }
 
  private:
   class DirectImpl {
@@ -152,8 +147,8 @@
     template <typename... Args>
     explicit constexpr DirectImpl(Args&&... args)
         : value_(std::forward<Args>(args)...) {}
-    const T* get() const { return &value_; }
-    T* get() { return &value_; }
+    absl::Nonnull<const T*> get() const { return &value_; }
+    absl::Nonnull<T*> get() { return &value_; }
 
    private:
     T value_;
@@ -165,14 +160,14 @@
     explicit PlacementImpl(Args&&... args) {
       new (&space_) T(std::forward<Args>(args)...);
     }
-    const T* get() const {
+    absl::Nonnull<const T*> get() const {
       return Launder(reinterpret_cast<const T*>(&space_));
     }
-    T* get() { return Launder(reinterpret_cast<T*>(&space_)); }
+    absl::Nonnull<T*> get() { return Launder(reinterpret_cast<T*>(&space_)); }
 
    private:
     template <typename P>
-    static P* Launder(P* p) {
+    static absl::Nonnull<P*> Launder(absl::Nonnull<P*> p) {
 #if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606L
       return std::launder(p);
 #elif ABSL_HAVE_BUILTIN(__builtin_launder)
diff --git a/absl/base/nullability.h b/absl/base/nullability.h
index 6f49b6f..34dc083 100644
--- a/absl/base/nullability.h
+++ b/absl/base/nullability.h
@@ -128,9 +128,17 @@
 //
 // By default, nullability annotations are applicable to raw and smart
 // pointers. User-defined types can indicate compatibility with nullability
-// annotations by providing an `absl_nullability_compatible` nested type. The
-// actual definition of this inner type is not relevant as it is used merely as
-// a marker. It is common to use a using declaration of
+// annotations by adding the ABSL_NULLABILITY_COMPATIBLE attribute.
+//
+// // Example:
+// struct ABSL_NULLABILITY_COMPATIBLE MyPtr {
+//   ...
+// };
+//
+// Note: For the time being, nullability-compatible classes should additionally
+// be marked with an `absl_nullability_compatible` nested type (this will soon
+// be deprecated). The actual definition of this inner type is not relevant as
+// it is used merely as a marker. It is common to use a using declaration of
 // `absl_nullability_compatible` set to void.
 //
 // // Example:
@@ -150,14 +158,16 @@
 #ifndef ABSL_BASE_NULLABILITY_H_
 #define ABSL_BASE_NULLABILITY_H_
 
+#include "absl/base/config.h"
 #include "absl/base/internal/nullability_impl.h"
 
 namespace absl {
+ABSL_NAMESPACE_BEGIN
 
 // absl::Nonnull
 //
 // The indicated pointer is never null. It is the responsibility of the provider
-// of this pointer across an API boundary to ensure that the pointer is never be
+// of this pointer across an API boundary to ensure that the pointer is never
 // set to null. Consumers of this pointer across an API boundary may safely
 // dereference the pointer.
 //
@@ -198,9 +208,9 @@
 // migrated into one of the above two nullability states: `Nonnull<T>` or
 //  `Nullable<T>`.
 //
-// NOTE: Because this annotation is the global default state, pointers without
-// any annotation are assumed to have "unknown" semantics. This assumption is
-// designed to minimize churn and reduce clutter within the codebase.
+// NOTE: Because this annotation is the global default state, unannotated
+// pointers are assumed to have "unknown" semantics. This assumption is designed
+// to minimize churn and reduce clutter within the codebase.
 //
 // Example:
 //
@@ -219,6 +229,22 @@
 template <typename T>
 using NullabilityUnknown = nullability_internal::NullabilityUnknownImpl<T>;
 
+ABSL_NAMESPACE_END
 }  // namespace absl
 
+// ABSL_NULLABILITY_COMPATIBLE
+//
+// Indicates that a class is compatible with nullability annotations.
+//
+// For example:
+//
+// struct ABSL_NULLABILITY_COMPATIBLE MyPtr {
+//   ...
+// };
+#if ABSL_HAVE_FEATURE(nullability_on_classes)
+#define ABSL_NULLABILITY_COMPATIBLE _Nullable
+#else
+#define ABSL_NULLABILITY_COMPATIBLE
+#endif
+
 #endif  // ABSL_BASE_NULLABILITY_H_
diff --git a/absl/base/optimization.h b/absl/base/optimization.h
index f985995..3aa66e1 100644
--- a/absl/base/optimization.h
+++ b/absl/base/optimization.h
@@ -18,12 +18,23 @@
 // -----------------------------------------------------------------------------
 //
 // This header file defines portable macros for performance optimization.
+//
+// This header is included in both C++ code and legacy C code and thus must
+// remain compatible with both C and C++. C compatibility will be removed if
+// the legacy code is removed or converted to C++. Do not include this header in
+// new code that requires C compatibility or assume C compatibility will remain
+// indefinitely.
 
 #ifndef ABSL_BASE_OPTIMIZATION_H_
 #define ABSL_BASE_OPTIMIZATION_H_
 
 #include <assert.h>
 
+#ifdef __cplusplus
+// Included for std::unreachable()
+#include <utility>
+#endif  // __cplusplus
+
 #include "absl/base/config.h"
 #include "absl/base/options.h"
 
diff --git a/absl/base/options.h b/absl/base/options.h
index 67cbf45..3ea6045 100644
--- a/absl/base/options.h
+++ b/absl/base/options.h
@@ -226,7 +226,7 @@
 // allowed.
 
 #define ABSL_OPTION_USE_INLINE_NAMESPACE 1
-#define ABSL_OPTION_INLINE_NAMESPACE_NAME lts_20240116
+#define ABSL_OPTION_INLINE_NAMESPACE_NAME lts_20240722
 
 // ABSL_OPTION_HARDENED
 //
diff --git a/absl/base/prefetch.h b/absl/base/prefetch.h
index eb40a44..482cde3 100644
--- a/absl/base/prefetch.h
+++ b/absl/base/prefetch.h
@@ -129,7 +129,7 @@
 //
 //  void* Arena::Allocate(size_t size) {
 //    void* ptr = AllocateBlock(size);
-//    absl::PrefetchToLocalCacheForWrite(p);
+//    absl::PrefetchToLocalCacheForWrite(ptr);
 //    return ptr;
 //  }
 //
diff --git a/absl/cleanup/cleanup_test.cc b/absl/cleanup/cleanup_test.cc
index 46b8858..72d7ff2 100644
--- a/absl/cleanup/cleanup_test.cc
+++ b/absl/cleanup/cleanup_test.cc
@@ -48,7 +48,7 @@
   explicit FunctorClass(Callback callback) : callback_(std::move(callback)) {}
 
   FunctorClass(FunctorClass&& other)
-      : callback_(absl::exchange(other.callback_, Callback())) {}
+      : callback_(std::exchange(other.callback_, Callback())) {}
 
   FunctorClass(const FunctorClass&) = delete;
 
diff --git a/absl/container/BUILD.bazel b/absl/container/BUILD.bazel
index 0ba2fa7..b00c30f 100644
--- a/absl/container/BUILD.bazel
+++ b/absl/container/BUILD.bazel
@@ -108,7 +108,7 @@
 
 cc_binary(
     name = "fixed_array_benchmark",
-    testonly = 1,
+    testonly = True,
     srcs = ["fixed_array_benchmark.cc"],
     copts = ABSL_TEST_COPTS + ["$(STACK_FRAME_UNLIMITED)"],
     linkopts = ABSL_DEFAULT_LINKOPTS,
@@ -126,6 +126,7 @@
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
         ":compressed_tuple",
+        "//absl/base:base_internal",
         "//absl/base:config",
         "//absl/base:core_headers",
         "//absl/memory",
@@ -151,7 +152,7 @@
 
 cc_library(
     name = "test_allocator",
-    testonly = 1,
+    testonly = True,
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
     textual_hdrs = ["internal/test_allocator.h"],
@@ -181,7 +182,7 @@
 
 cc_binary(
     name = "inlined_vector_benchmark",
-    testonly = 1,
+    testonly = True,
     srcs = ["inlined_vector_benchmark.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
@@ -210,7 +211,7 @@
 
 cc_library(
     name = "test_instance_tracker",
-    testonly = 1,
+    testonly = True,
     srcs = ["internal/test_instance_tracker.cc"],
     hdrs = ["internal/test_instance_tracker.h"],
     copts = ABSL_DEFAULT_COPTS,
@@ -247,11 +248,11 @@
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
         ":container_memory",
-        ":hash_function_defaults",
+        ":hash_container_defaults",
         ":raw_hash_map",
         "//absl/algorithm:container",
         "//absl/base:core_headers",
-        "//absl/memory",
+        "//absl/meta:type_traits",
     ],
 )
 
@@ -264,10 +265,13 @@
     deps = [
         ":flat_hash_map",
         ":hash_generator_testing",
+        ":hash_policy_testing",
+        ":test_allocator",
         ":unordered_map_constructor_test",
         ":unordered_map_lookup_test",
         ":unordered_map_members_test",
         ":unordered_map_modifiers_test",
+        "//absl/base:config",
         "//absl/log:check",
         "//absl/meta:type_traits",
         "//absl/types:any",
@@ -283,11 +287,12 @@
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
         ":container_memory",
-        ":hash_function_defaults",
+        ":hash_container_defaults",
         ":raw_hash_set",
         "//absl/algorithm:container",
         "//absl/base:core_headers",
         "//absl/memory",
+        "//absl/meta:type_traits",
     ],
 )
 
@@ -301,6 +306,7 @@
         ":container_memory",
         ":flat_hash_set",
         ":hash_generator_testing",
+        ":test_allocator",
         ":unordered_set_constructor_test",
         ":unordered_set_lookup_test",
         ":unordered_set_members_test",
@@ -321,12 +327,13 @@
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
         ":container_memory",
-        ":hash_function_defaults",
+        ":hash_container_defaults",
         ":node_slot_policy",
         ":raw_hash_map",
         "//absl/algorithm:container",
         "//absl/base:core_headers",
         "//absl/memory",
+        "//absl/meta:type_traits",
     ],
 )
 
@@ -337,13 +344,14 @@
     linkopts = ABSL_DEFAULT_LINKOPTS,
     tags = ["no_test_loonix"],
     deps = [
-        ":hash_generator_testing",
+        ":hash_policy_testing",
         ":node_hash_map",
         ":tracked",
         ":unordered_map_constructor_test",
         ":unordered_map_lookup_test",
         ":unordered_map_members_test",
         ":unordered_map_modifiers_test",
+        "//absl/base:config",
         "@com_google_googletest//:gtest",
         "@com_google_googletest//:gtest_main",
     ],
@@ -355,12 +363,14 @@
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
-        ":hash_function_defaults",
+        ":container_memory",
+        ":hash_container_defaults",
         ":node_slot_policy",
         ":raw_hash_set",
         "//absl/algorithm:container",
         "//absl/base:core_headers",
         "//absl/memory",
+        "//absl/meta:type_traits",
     ],
 )
 
@@ -371,11 +381,15 @@
     linkopts = ABSL_DEFAULT_LINKOPTS,
     tags = ["no_test_loonix"],
     deps = [
+        ":hash_generator_testing",
+        ":hash_policy_testing",
         ":node_hash_set",
         ":unordered_set_constructor_test",
         ":unordered_set_lookup_test",
         ":unordered_set_members_test",
         ":unordered_set_modifiers_test",
+        "//absl/base:config",
+        "//absl/memory",
         "@com_google_googletest//:gtest",
         "@com_google_googletest//:gtest_main",
     ],
@@ -420,13 +434,26 @@
         "//visibility:private",
     ],
     deps = [
+        ":common",
         "//absl/base:config",
         "//absl/hash",
+        "//absl/meta:type_traits",
         "//absl/strings",
         "//absl/strings:cord",
     ],
 )
 
+cc_library(
+    name = "hash_container_defaults",
+    hdrs = ["hash_container_defaults.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":hash_function_defaults",
+        "//absl/base:config",
+    ],
+)
+
 cc_test(
     name = "hash_function_defaults_test",
     srcs = ["internal/hash_function_defaults_test.cc"],
@@ -434,6 +461,8 @@
     linkopts = ABSL_DEFAULT_LINKOPTS,
     tags = NOTEST_TAGS_MOBILE + ["no_test_loonix"],
     deps = [
+        ":flat_hash_map",
+        ":flat_hash_set",
         ":hash_function_defaults",
         "//absl/hash",
         "//absl/random",
@@ -447,7 +476,7 @@
 
 cc_library(
     name = "hash_generator_testing",
-    testonly = 1,
+    testonly = True,
     srcs = ["internal/hash_generator_testing.cc"],
     hdrs = ["internal/hash_generator_testing.h"],
     copts = ABSL_TEST_COPTS,
@@ -463,7 +492,7 @@
 
 cc_library(
     name = "hash_policy_testing",
-    testonly = 1,
+    testonly = True,
     hdrs = ["internal/hash_policy_testing.h"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
@@ -502,6 +531,7 @@
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
+        ":container_memory",
         ":hash_policy_traits",
         "@com_google_googletest//:gtest",
         "@com_google_googletest//:gtest_main",
@@ -563,6 +593,7 @@
         "//absl/base",
         "//absl/base:config",
         "//absl/base:core_headers",
+        "//absl/base:no_destructor",
         "//absl/base:raw_logging_internal",
         "//absl/debugging:stacktrace",
         "//absl/memory",
@@ -686,14 +717,18 @@
         ":hash_policy_testing",
         ":hashtable_debug",
         ":hashtablez_sampler",
+        ":node_hash_set",
         ":raw_hash_set",
         ":test_allocator",
+        ":test_instance_tracker",
         "//absl/base",
         "//absl/base:config",
         "//absl/base:core_headers",
         "//absl/base:prefetch",
+        "//absl/functional:function_ref",
         "//absl/hash",
         "//absl/log",
+        "//absl/log:check",
         "//absl/memory",
         "//absl/meta:type_traits",
         "//absl/strings",
@@ -704,16 +739,18 @@
 
 cc_binary(
     name = "raw_hash_set_benchmark",
-    testonly = 1,
+    testonly = True,
     srcs = ["internal/raw_hash_set_benchmark.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
     tags = ["benchmark"],
     visibility = ["//visibility:private"],
     deps = [
+        ":container_memory",
         ":hash_function_defaults",
         ":raw_hash_set",
         "//absl/base:raw_logging_internal",
+        "//absl/random",
         "//absl/strings:str_format",
         "@com_github_google_benchmark//:benchmark_main",
     ],
@@ -721,7 +758,7 @@
 
 cc_binary(
     name = "raw_hash_set_probe_benchmark",
-    testonly = 1,
+    testonly = True,
     srcs = ["internal/raw_hash_set_probe_benchmark.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = select({
@@ -750,6 +787,7 @@
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
+        ":container_memory",
         ":raw_hash_set",
         ":tracked",
         "//absl/base:config",
@@ -795,7 +833,7 @@
 
 cc_binary(
     name = "layout_benchmark",
-    testonly = 1,
+    testonly = True,
     srcs = ["internal/layout_benchmark.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
@@ -811,7 +849,7 @@
 
 cc_library(
     name = "tracked",
-    testonly = 1,
+    testonly = True,
     hdrs = ["internal/tracked.h"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
@@ -822,7 +860,7 @@
 
 cc_library(
     name = "unordered_map_constructor_test",
-    testonly = 1,
+    testonly = True,
     hdrs = ["internal/unordered_map_constructor_test.h"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
@@ -835,7 +873,7 @@
 
 cc_library(
     name = "unordered_map_lookup_test",
-    testonly = 1,
+    testonly = True,
     hdrs = ["internal/unordered_map_lookup_test.h"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
@@ -848,7 +886,7 @@
 
 cc_library(
     name = "unordered_map_modifiers_test",
-    testonly = 1,
+    testonly = True,
     hdrs = ["internal/unordered_map_modifiers_test.h"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
@@ -861,7 +899,7 @@
 
 cc_library(
     name = "unordered_set_constructor_test",
-    testonly = 1,
+    testonly = True,
     hdrs = ["internal/unordered_set_constructor_test.h"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
@@ -875,7 +913,7 @@
 
 cc_library(
     name = "unordered_set_members_test",
-    testonly = 1,
+    testonly = True,
     hdrs = ["internal/unordered_set_members_test.h"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
@@ -887,7 +925,7 @@
 
 cc_library(
     name = "unordered_map_members_test",
-    testonly = 1,
+    testonly = True,
     hdrs = ["internal/unordered_map_members_test.h"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
@@ -899,7 +937,7 @@
 
 cc_library(
     name = "unordered_set_lookup_test",
-    testonly = 1,
+    testonly = True,
     hdrs = ["internal/unordered_set_lookup_test.h"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
@@ -912,7 +950,7 @@
 
 cc_library(
     name = "unordered_set_modifiers_test",
-    testonly = 1,
+    testonly = True,
     hdrs = ["internal/unordered_set_modifiers_test.h"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
@@ -991,6 +1029,7 @@
         ":compressed_tuple",
         ":container_memory",
         ":layout",
+        "//absl/base:config",
         "//absl/base:core_headers",
         "//absl/base:raw_logging_internal",
         "//absl/base:throw_delegate",
@@ -999,13 +1038,12 @@
         "//absl/strings",
         "//absl/strings:cord",
         "//absl/types:compare",
-        "//absl/utility",
     ],
 )
 
 cc_library(
     name = "btree_test_common",
-    testonly = 1,
+    testonly = True,
     hdrs = ["btree_test.h"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
@@ -1056,7 +1094,7 @@
 
 cc_binary(
     name = "btree_benchmark",
-    testonly = 1,
+    testonly = True,
     srcs = [
         "btree_benchmark.cc",
     ],
diff --git a/absl/container/CMakeLists.txt b/absl/container/CMakeLists.txt
index 128cc0e..25831d5 100644
--- a/absl/container/CMakeLists.txt
+++ b/absl/container/CMakeLists.txt
@@ -27,10 +27,11 @@
   LINKOPTS
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
-    absl::container_common
     absl::common_policy_traits
     absl::compare
     absl::compressed_tuple
+    absl::config
+    absl::container_common
     absl::container_memory
     absl::cord
     absl::core_headers
@@ -40,7 +41,6 @@
     absl::strings
     absl::throw_delegate
     absl::type_traits
-    absl::utility
 )
 
 # Internal-only target, do not depend on directly.
@@ -176,6 +176,7 @@
   COPTS
     ${ABSL_DEFAULT_COPTS}
   DEPS
+    absl::base_internal
     absl::compressed_tuple
     absl::config
     absl::core_headers
@@ -213,6 +214,7 @@
   DEPS
     absl::config
     GTest::gmock
+  TESTONLY
 )
 
 absl_cc_test(
@@ -287,10 +289,10 @@
   DEPS
     absl::container_memory
     absl::core_headers
-    absl::hash_function_defaults
+    absl::hash_container_defaults
     absl::raw_hash_map
     absl::algorithm_container
-    absl::memory
+    absl::type_traits
   PUBLIC
 )
 
@@ -304,8 +306,11 @@
   DEPS
     absl::any
     absl::check
+    absl::config
     absl::flat_hash_map
     absl::hash_generator_testing
+    absl::hash_policy_testing
+    absl::test_allocator
     absl::type_traits
     absl::unordered_map_constructor_test
     absl::unordered_map_lookup_test
@@ -323,11 +328,12 @@
     ${ABSL_DEFAULT_COPTS}
   DEPS
     absl::container_memory
-    absl::hash_function_defaults
+    absl::hash_container_defaults
     absl::raw_hash_set
     absl::algorithm_container
     absl::core_headers
     absl::memory
+    absl::type_traits
   PUBLIC
 )
 
@@ -347,6 +353,7 @@
     absl::hash_generator_testing
     absl::memory
     absl::strings
+    absl::test_allocator
     absl::unordered_set_constructor_test
     absl::unordered_set_lookup_test
     absl::unordered_set_members_test
@@ -364,11 +371,12 @@
   DEPS
     absl::container_memory
     absl::core_headers
-    absl::hash_function_defaults
+    absl::hash_container_defaults
     absl::node_slot_policy
     absl::raw_hash_map
     absl::algorithm_container
     absl::memory
+    absl::type_traits
   PUBLIC
 )
 
@@ -380,7 +388,8 @@
   COPTS
     ${ABSL_TEST_COPTS}
   DEPS
-    absl::hash_generator_testing
+    absl::config
+    absl::hash_policy_testing
     absl::node_hash_map
     absl::tracked
     absl::unordered_map_constructor_test
@@ -398,12 +407,14 @@
   COPTS
     ${ABSL_DEFAULT_COPTS}
   DEPS
+    absl::container_memory
     absl::core_headers
-    absl::hash_function_defaults
+    absl::hash_container_defaults
     absl::node_slot_policy
     absl::raw_hash_set
     absl::algorithm_container
     absl::memory
+    absl::type_traits
   PUBLIC
 )
 
@@ -417,7 +428,10 @@
     "-DUNORDERED_SET_CXX17"
   DEPS
     absl::hash_generator_testing
+    absl::hash_policy_testing
+    absl::memory
     absl::node_hash_set
+    absl::type_traits
     absl::unordered_set_constructor_test
     absl::unordered_set_lookup_test
     absl::unordered_set_members_test
@@ -425,6 +439,19 @@
     GTest::gmock_main
 )
 
+absl_cc_library(
+  NAME
+    hash_container_defaults
+  HDRS
+    "hash_container_defaults.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::config
+    absl::hash_function_defaults
+  PUBLIC
+)
+
 # Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
@@ -467,9 +494,11 @@
     ${ABSL_DEFAULT_COPTS}
   DEPS
     absl::config
+    absl::container_common
     absl::cord
     absl::hash
     absl::strings
+    absl::type_traits
   PUBLIC
 )
 
@@ -483,6 +512,8 @@
   DEPS
     absl::cord
     absl::cord_test_helpers
+    absl::flat_hash_map
+    absl::flat_hash_set
     absl::hash_function_defaults
     absl::hash
     absl::random_random
@@ -557,6 +588,7 @@
   COPTS
     ${ABSL_TEST_COPTS}
   DEPS
+    absl::container_memory
     absl::hash_policy_traits
     GTest::gmock_main
 )
@@ -602,6 +634,7 @@
     absl::base
     absl::config
     absl::exponential_biased
+    absl::no_destructor
     absl::raw_logging_internal
     absl::sample_recorder
     absl::synchronization
@@ -743,11 +776,13 @@
     ${ABSL_TEST_COPTS}
   DEPS
     absl::base
+    absl::check
     absl::config
     absl::container_memory
     absl::core_headers
     absl::flat_hash_map
     absl::flat_hash_set
+    absl::function_ref
     absl::hash
     absl::hash_function_defaults
     absl::hash_policy_testing
@@ -755,10 +790,12 @@
     absl::hashtablez_sampler
     absl::log
     absl::memory
+    absl::node_hash_set
     absl::prefetch
     absl::raw_hash_set
     absl::strings
     absl::test_allocator
+    absl::test_instance_tracker
     absl::type_traits
     GTest::gmock_main
 )
@@ -772,6 +809,7 @@
     ${ABSL_TEST_COPTS}
   DEPS
     absl::config
+    absl::container_memory
     absl::raw_hash_set
     absl::tracked
     GTest::gmock_main
diff --git a/absl/container/btree_map.h b/absl/container/btree_map.h
index 0f62f0b..b959b67 100644
--- a/absl/container/btree_map.h
+++ b/absl/container/btree_map.h
@@ -49,6 +49,8 @@
 //
 // Another API difference is that btree iterators can be subtracted, and this
 // is faster than using std::distance.
+//
+// B-tree maps are not exception-safe.
 
 #ifndef ABSL_CONTAINER_BTREE_MAP_H_
 #define ABSL_CONTAINER_BTREE_MAP_H_
@@ -85,7 +87,7 @@
 //
 template <typename Key, typename Value, typename Compare = std::less<Key>,
           typename Alloc = std::allocator<std::pair<const Key, Value>>>
-class btree_map
+class ABSL_INTERNAL_ATTRIBUTE_OWNER btree_map
     : public container_internal::btree_map_container<
           container_internal::btree<container_internal::map_params<
               Key, Value, Compare, Alloc, /*TargetNodeSize=*/256,
@@ -523,7 +525,7 @@
 //
 template <typename Key, typename Value, typename Compare = std::less<Key>,
           typename Alloc = std::allocator<std::pair<const Key, Value>>>
-class btree_multimap
+class ABSL_INTERNAL_ATTRIBUTE_OWNER btree_multimap
     : public container_internal::btree_multimap_container<
           container_internal::btree<container_internal::map_params<
               Key, Value, Compare, Alloc, /*TargetNodeSize=*/256,
diff --git a/absl/container/btree_set.h b/absl/container/btree_set.h
index 51dc42b..986d27d 100644
--- a/absl/container/btree_set.h
+++ b/absl/container/btree_set.h
@@ -48,10 +48,13 @@
 //
 // Another API difference is that btree iterators can be subtracted, and this
 // is faster than using std::distance.
+//
+// B-tree sets are not exception-safe.
 
 #ifndef ABSL_CONTAINER_BTREE_SET_H_
 #define ABSL_CONTAINER_BTREE_SET_H_
 
+#include "absl/base/attributes.h"
 #include "absl/container/internal/btree.h"  // IWYU pragma: export
 #include "absl/container/internal/btree_container.h"  // IWYU pragma: export
 
@@ -86,7 +89,7 @@
 //
 template <typename Key, typename Compare = std::less<Key>,
           typename Alloc = std::allocator<Key>>
-class btree_set
+class ABSL_INTERNAL_ATTRIBUTE_OWNER btree_set
     : public container_internal::btree_set_container<
           container_internal::btree<container_internal::set_params<
               Key, Compare, Alloc, /*TargetNodeSize=*/256,
@@ -442,7 +445,7 @@
 //
 template <typename Key, typename Compare = std::less<Key>,
           typename Alloc = std::allocator<Key>>
-class btree_multiset
+class ABSL_INTERNAL_ATTRIBUTE_OWNER btree_multiset
     : public container_internal::btree_multiset_container<
           container_internal::btree<container_internal::set_params<
               Key, Compare, Alloc, /*TargetNodeSize=*/256,
diff --git a/absl/container/flat_hash_map.h b/absl/container/flat_hash_map.h
index acd013b..ebd9ed6 100644
--- a/absl/container/flat_hash_map.h
+++ b/absl/container/flat_hash_map.h
@@ -26,21 +26,24 @@
 //
 // In most cases, your default choice for a hash map should be a map of type
 // `flat_hash_map`.
+//
+// `flat_hash_map` is not exception-safe.
 
 #ifndef ABSL_CONTAINER_FLAT_HASH_MAP_H_
 #define ABSL_CONTAINER_FLAT_HASH_MAP_H_
 
 #include <cstddef>
-#include <new>
+#include <memory>
 #include <type_traits>
 #include <utility>
 
 #include "absl/algorithm/container.h"
+#include "absl/base/attributes.h"
 #include "absl/base/macros.h"
+#include "absl/container/hash_container_defaults.h"
 #include "absl/container/internal/container_memory.h"
-#include "absl/container/internal/hash_function_defaults.h"  // IWYU pragma: export
 #include "absl/container/internal/raw_hash_map.h"  // IWYU pragma: export
-#include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
@@ -62,7 +65,7 @@
 // * Requires values that are MoveConstructible
 // * Supports heterogeneous lookup, through `find()`, `operator[]()` and
 //   `insert()`, provided that the map is provided a compatible heterogeneous
-//   hashing function and equality operator.
+//   hashing function and equality operator. See below for details.
 // * Invalidates any references and pointers to elements within the table after
 //   `rehash()` and when the table is moved.
 // * Contains a `capacity()` member function indicating the number of element
@@ -80,6 +83,19 @@
 // libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may
 // be randomized across dynamically loaded libraries.
 //
+// To achieve heterogeneous lookup for custom types either `Hash` and `Eq` type
+// parameters can be used or `T` should have public inner types
+// `absl_container_hash` and (optionally) `absl_container_eq`. In either case,
+// `typename Hash::is_transparent` and `typename Eq::is_transparent` should be
+// well-formed. Both types are basically functors:
+// * `Hash` should support `size_t operator()(U val) const` that returns a hash
+// for the given `val`.
+// * `Eq` should support `bool operator()(U lhs, V rhs) const` that returns true
+// if `lhs` is equal to `rhs`.
+//
+// In most cases `T` needs only to provide the `absl_container_hash`. In this
+// case `std::equal_to<void>` will be used instead of `eq` part.
+//
 // NOTE: A `flat_hash_map` stores its value types directly inside its
 // implementation array to avoid memory indirection. Because a `flat_hash_map`
 // is designed to move data when rehashed, map values will not retain pointer
@@ -106,13 +122,13 @@
 //  if (result != ducks.end()) {
 //    std::cout << "Result: " << result->second << std::endl;
 //  }
-template <class K, class V,
-          class Hash = absl::container_internal::hash_default_hash<K>,
-          class Eq = absl::container_internal::hash_default_eq<K>,
+template <class K, class V, class Hash = DefaultHashContainerHash<K>,
+          class Eq = DefaultHashContainerEq<K>,
           class Allocator = std::allocator<std::pair<const K, V>>>
-class flat_hash_map : public absl::container_internal::raw_hash_map<
-                          absl::container_internal::FlatHashMapPolicy<K, V>,
-                          Hash, Eq, Allocator> {
+class ABSL_INTERNAL_ATTRIBUTE_OWNER flat_hash_map
+    : public absl::container_internal::raw_hash_map<
+          absl::container_internal::FlatHashMapPolicy<K, V>, Hash, Eq,
+          Allocator> {
   using Base = typename flat_hash_map::raw_hash_map;
 
  public:
@@ -560,6 +576,38 @@
 
 namespace container_internal {
 
+// c_for_each_fast(flat_hash_map<>, Function)
+//
+// Container-based version of the <algorithm> `std::for_each()` function to
+// apply a function to a container's elements.
+// There is no guarantees on the order of the function calls.
+// Erasure and/or insertion of elements in the function is not allowed.
+template <typename K, typename V, typename H, typename E, typename A,
+          typename Function>
+decay_t<Function> c_for_each_fast(const flat_hash_map<K, V, H, E, A>& c,
+                                  Function&& f) {
+  container_internal::ForEach(f, &c);
+  return f;
+}
+template <typename K, typename V, typename H, typename E, typename A,
+          typename Function>
+decay_t<Function> c_for_each_fast(flat_hash_map<K, V, H, E, A>& c,
+                                  Function&& f) {
+  container_internal::ForEach(f, &c);
+  return f;
+}
+template <typename K, typename V, typename H, typename E, typename A,
+          typename Function>
+decay_t<Function> c_for_each_fast(flat_hash_map<K, V, H, E, A>&& c,
+                                  Function&& f) {
+  container_internal::ForEach(f, &c);
+  return f;
+}
+
+}  // namespace container_internal
+
+namespace container_internal {
+
 template <class K, class V>
 struct FlatHashMapPolicy {
   using slot_policy = container_internal::map_slot_policy<K, V>;
@@ -573,9 +621,10 @@
     slot_policy::construct(alloc, slot, std::forward<Args>(args)...);
   }
 
+  // Returns std::true_type in case destroy is trivial.
   template <class Allocator>
-  static void destroy(Allocator* alloc, slot_type* slot) {
-    slot_policy::destroy(alloc, slot);
+  static auto destroy(Allocator* alloc, slot_type* slot) {
+    return slot_policy::destroy(alloc, slot);
   }
 
   template <class Allocator>
@@ -592,6 +641,13 @@
                                                    std::forward<Args>(args)...);
   }
 
+  template <class Hash>
+  static constexpr HashSlotFn get_hash_slot_fn() {
+    return memory_internal::IsLayoutCompatible<K, V>::value
+               ? &TypeErasedApplyToSlotFn<Hash, K>
+               : nullptr;
+  }
+
   static size_t space_used(const slot_type*) { return 0; }
 
   static std::pair<const K, V>& element(slot_type* slot) { return slot->value; }
diff --git a/absl/container/flat_hash_map_test.cc b/absl/container/flat_hash_map_test.cc
index d90fe9d..08915e2 100644
--- a/absl/container/flat_hash_map_test.cc
+++ b/absl/container/flat_hash_map_test.cc
@@ -16,12 +16,17 @@
 
 #include <cstddef>
 #include <memory>
+#include <string>
 #include <type_traits>
 #include <utility>
 #include <vector>
 
+#include "gmock/gmock.h"
 #include "gtest/gtest.h"
+#include "absl/base/config.h"
 #include "absl/container/internal/hash_generator_testing.h"
+#include "absl/container/internal/hash_policy_testing.h"
+#include "absl/container/internal/test_allocator.h"
 #include "absl/container/internal/unordered_map_constructor_test.h"
 #include "absl/container/internal/unordered_map_lookup_test.h"
 #include "absl/container/internal/unordered_map_members_test.h"
@@ -40,6 +45,7 @@
 using ::testing::IsEmpty;
 using ::testing::Pair;
 using ::testing::UnorderedElementsAre;
+using ::testing::UnorderedElementsAreArray;
 
 // Check that absl::flat_hash_map works in a global constructor.
 struct BeforeMain {
@@ -302,6 +308,58 @@
   }
 }
 
+TEST(FlatHashMap, CForEach) {
+  flat_hash_map<int, int> m;
+  std::vector<std::pair<int, int>> expected;
+  for (int i = 0; i < 100; ++i) {
+    {
+      SCOPED_TRACE("mutable object iteration");
+      std::vector<std::pair<int, int>> v;
+      absl::container_internal::c_for_each_fast(
+          m, [&v](std::pair<const int, int>& p) { v.push_back(p); });
+      EXPECT_THAT(v, UnorderedElementsAreArray(expected));
+    }
+    {
+      SCOPED_TRACE("const object iteration");
+      std::vector<std::pair<int, int>> v;
+      const flat_hash_map<int, int>& cm = m;
+      absl::container_internal::c_for_each_fast(
+          cm, [&v](const std::pair<const int, int>& p) { v.push_back(p); });
+      EXPECT_THAT(v, UnorderedElementsAreArray(expected));
+    }
+    {
+      SCOPED_TRACE("const object iteration");
+      std::vector<std::pair<int, int>> v;
+      absl::container_internal::c_for_each_fast(
+          flat_hash_map<int, int>(m),
+          [&v](std::pair<const int, int>& p) { v.push_back(p); });
+      EXPECT_THAT(v, UnorderedElementsAreArray(expected));
+    }
+    m[i] = i;
+    expected.emplace_back(i, i);
+  }
+}
+
+TEST(FlatHashMap, CForEachMutate) {
+  flat_hash_map<int, int> s;
+  std::vector<std::pair<int, int>> expected;
+  for (int i = 0; i < 100; ++i) {
+    std::vector<std::pair<int, int>> v;
+    absl::container_internal::c_for_each_fast(
+        s, [&v](std::pair<const int, int>& p) {
+          v.push_back(p);
+          p.second++;
+        });
+    EXPECT_THAT(v, UnorderedElementsAreArray(expected));
+    for (auto& p : expected) {
+      p.second++;
+    }
+    EXPECT_THAT(s, UnorderedElementsAreArray(expected));
+    s[i] = i;
+    expected.emplace_back(i, i);
+  }
+}
+
 // This test requires std::launder for mutable key access in node handles.
 #if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
 TEST(FlatHashMap, NodeHandleMutableKeyAccess) {
@@ -351,6 +409,49 @@
   t.m[0] = RecursiveType{};
 }
 
+TEST(FlatHashMap, FlatHashMapPolicyDestroyReturnsTrue) {
+  EXPECT_TRUE(
+      (decltype(FlatHashMapPolicy<int, char>::destroy<std::allocator<char>>(
+          nullptr, nullptr))()));
+  EXPECT_FALSE(
+      (decltype(FlatHashMapPolicy<int, char>::destroy<CountingAllocator<char>>(
+          nullptr, nullptr))()));
+  EXPECT_FALSE((decltype(FlatHashMapPolicy<int, std::unique_ptr<int>>::destroy<
+                         std::allocator<char>>(nullptr, nullptr))()));
+}
+
+struct InconsistentHashEqType {
+  InconsistentHashEqType(int v1, int v2) : v1(v1), v2(v2) {}
+  template <typename H>
+  friend H AbslHashValue(H h, InconsistentHashEqType t) {
+    return H::combine(std::move(h), t.v1);
+  }
+  bool operator==(InconsistentHashEqType t) const { return v2 == t.v2; }
+  int v1, v2;
+};
+
+TEST(Iterator, InconsistentHashEqFunctorsValidation) {
+  if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
+
+  absl::flat_hash_map<InconsistentHashEqType, int> m;
+  for (int i = 0; i < 10; ++i) m[{i, i}] = 1;
+  // We need to insert multiple times to guarantee that we get the assertion
+  // because it's possible for the hash to collide with the inserted element
+  // that has v2==0. In those cases, the new element won't be inserted.
+  auto insert_conflicting_elems = [&] {
+    for (int i = 100; i < 20000; ++i) {
+      EXPECT_EQ((m[{i, 0}]), 1);
+    }
+  };
+
+  const char* crash_message = "hash/eq functors are inconsistent.";
+#if defined(__arm__) || defined(__aarch64__)
+  // On ARM, the crash message is garbled so don't expect a specific message.
+  crash_message = "";
+#endif
+  EXPECT_DEATH_IF_SUPPORTED(insert_conflicting_elems(), crash_message);
+}
+
 }  // namespace
 }  // namespace container_internal
 ABSL_NAMESPACE_END
diff --git a/absl/container/flat_hash_set.h b/absl/container/flat_hash_set.h
index a94a82a..a3e36e0 100644
--- a/absl/container/flat_hash_set.h
+++ b/absl/container/flat_hash_set.h
@@ -26,18 +26,25 @@
 //
 // In most cases, your default choice for a hash set should be a set of type
 // `flat_hash_set`.
+//
+// `flat_hash_set` is not exception-safe.
+
 #ifndef ABSL_CONTAINER_FLAT_HASH_SET_H_
 #define ABSL_CONTAINER_FLAT_HASH_SET_H_
 
+#include <cstddef>
+#include <memory>
 #include <type_traits>
 #include <utility>
 
 #include "absl/algorithm/container.h"
+#include "absl/base/attributes.h"
 #include "absl/base/macros.h"
+#include "absl/container/hash_container_defaults.h"
 #include "absl/container/internal/container_memory.h"
-#include "absl/container/internal/hash_function_defaults.h"  // IWYU pragma: export
 #include "absl/container/internal/raw_hash_set.h"  // IWYU pragma: export
 #include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
@@ -58,7 +65,7 @@
 // * Requires keys that are CopyConstructible
 // * Supports heterogeneous lookup, through `find()` and `insert()`, provided
 //   that the set is provided a compatible heterogeneous hashing function and
-//   equality operator.
+//   equality operator. See below for details.
 // * Invalidates any references and pointers to elements within the table after
 //   `rehash()` and when the table is moved.
 // * Contains a `capacity()` member function indicating the number of element
@@ -76,6 +83,19 @@
 // libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may
 // be randomized across dynamically loaded libraries.
 //
+// To achieve heterogeneous lookup for custom types either `Hash` and `Eq` type
+// parameters can be used or `T` should have public inner types
+// `absl_container_hash` and (optionally) `absl_container_eq`. In either case,
+// `typename Hash::is_transparent` and `typename Eq::is_transparent` should be
+// well-formed. Both types are basically functors:
+// * `Hash` should support `size_t operator()(U val) const` that returns a hash
+// for the given `val`.
+// * `Eq` should support `bool operator()(U lhs, V rhs) const` that returns true
+// if `lhs` is equal to `rhs`.
+//
+// In most cases `T` needs only to provide the `absl_container_hash`. In this
+// case `std::equal_to<void>` will be used instead of `eq` part.
+//
 // NOTE: A `flat_hash_set` stores its keys directly inside its implementation
 // array to avoid memory indirection. Because a `flat_hash_set` is designed to
 // move data when rehashed, set keys will not retain pointer stability. If you
@@ -99,10 +119,10 @@
 //  if (ducks.contains("dewey")) {
 //    std::cout << "We found dewey!" << std::endl;
 //  }
-template <class T, class Hash = absl::container_internal::hash_default_hash<T>,
-          class Eq = absl::container_internal::hash_default_eq<T>,
+template <class T, class Hash = DefaultHashContainerHash<T>,
+          class Eq = DefaultHashContainerEq<T>,
           class Allocator = std::allocator<T>>
-class flat_hash_set
+class ABSL_INTERNAL_ATTRIBUTE_OWNER flat_hash_set
     : public absl::container_internal::raw_hash_set<
           absl::container_internal::FlatHashSetPolicy<T>, Hash, Eq, Allocator> {
   using Base = typename flat_hash_set::raw_hash_set;
@@ -460,6 +480,33 @@
 
 namespace container_internal {
 
+// c_for_each_fast(flat_hash_set<>, Function)
+//
+// Container-based version of the <algorithm> `std::for_each()` function to
+// apply a function to a container's elements.
+// There is no guarantees on the order of the function calls.
+// Erasure and/or insertion of elements in the function is not allowed.
+template <typename T, typename H, typename E, typename A, typename Function>
+decay_t<Function> c_for_each_fast(const flat_hash_set<T, H, E, A>& c,
+                                  Function&& f) {
+  container_internal::ForEach(f, &c);
+  return f;
+}
+template <typename T, typename H, typename E, typename A, typename Function>
+decay_t<Function> c_for_each_fast(flat_hash_set<T, H, E, A>& c, Function&& f) {
+  container_internal::ForEach(f, &c);
+  return f;
+}
+template <typename T, typename H, typename E, typename A, typename Function>
+decay_t<Function> c_for_each_fast(flat_hash_set<T, H, E, A>&& c, Function&& f) {
+  container_internal::ForEach(f, &c);
+  return f;
+}
+
+}  // namespace container_internal
+
+namespace container_internal {
+
 template <class T>
 struct FlatHashSetPolicy {
   using slot_type = T;
@@ -473,9 +520,11 @@
                                                  std::forward<Args>(args)...);
   }
 
+  // Return std::true_type in case destroy is trivial.
   template <class Allocator>
-  static void destroy(Allocator* alloc, slot_type* slot) {
+  static auto destroy(Allocator* alloc, slot_type* slot) {
     absl::allocator_traits<Allocator>::destroy(*alloc, slot);
+    return IsDestructionTrivial<Allocator, slot_type>();
   }
 
   static T& element(slot_type* slot) { return *slot; }
@@ -489,6 +538,11 @@
   }
 
   static size_t space_used(const T*) { return 0; }
+
+  template <class Hash>
+  static constexpr HashSlotFn get_hash_slot_fn() {
+    return &TypeErasedApplyToSlotFn<Hash, T>;
+  }
 };
 }  // namespace container_internal
 
diff --git a/absl/container/flat_hash_set_test.cc b/absl/container/flat_hash_set_test.cc
index a60b4bf..0dd4326 100644
--- a/absl/container/flat_hash_set_test.cc
+++ b/absl/container/flat_hash_set_test.cc
@@ -16,6 +16,7 @@
 
 #include <cstdint>
 #include <memory>
+#include <type_traits>
 #include <utility>
 #include <vector>
 
@@ -24,6 +25,7 @@
 #include "absl/base/config.h"
 #include "absl/container/internal/container_memory.h"
 #include "absl/container/internal/hash_generator_testing.h"
+#include "absl/container/internal/test_allocator.h"
 #include "absl/container/internal/unordered_set_constructor_test.h"
 #include "absl/container/internal/unordered_set_lookup_test.h"
 #include "absl/container/internal/unordered_set_members_test.h"
@@ -179,15 +181,46 @@
   }
 }
 
-class PoisonInline {
+TEST(FlatHashSet, CForEach) {
+  using ValueType = std::pair<int, int>;
+  flat_hash_set<ValueType> s;
+  std::vector<ValueType> expected;
+  for (int i = 0; i < 100; ++i) {
+    {
+      SCOPED_TRACE("mutable object iteration");
+      std::vector<ValueType> v;
+      absl::container_internal::c_for_each_fast(
+          s, [&v](const ValueType& p) { v.push_back(p); });
+      ASSERT_THAT(v, UnorderedElementsAreArray(expected));
+    }
+    {
+      SCOPED_TRACE("const object iteration");
+      std::vector<ValueType> v;
+      const flat_hash_set<ValueType>& cs = s;
+      absl::container_internal::c_for_each_fast(
+          cs, [&v](const ValueType& p) { v.push_back(p); });
+      ASSERT_THAT(v, UnorderedElementsAreArray(expected));
+    }
+    {
+      SCOPED_TRACE("temporary object iteration");
+      std::vector<ValueType> v;
+      absl::container_internal::c_for_each_fast(
+          flat_hash_set<ValueType>(s),
+          [&v](const ValueType& p) { v.push_back(p); });
+      ASSERT_THAT(v, UnorderedElementsAreArray(expected));
+    }
+    s.emplace(i, i);
+    expected.emplace_back(i, i);
+  }
+}
+
+class PoisonSoo {
   int64_t data_;
 
  public:
-  explicit PoisonInline(int64_t d) : data_(d) {
-    SanitizerPoisonObject(&data_);
-  }
-  PoisonInline(const PoisonInline& that) : PoisonInline(*that) {}
-  ~PoisonInline() { SanitizerUnpoisonObject(&data_); }
+  explicit PoisonSoo(int64_t d) : data_(d) { SanitizerPoisonObject(&data_); }
+  PoisonSoo(const PoisonSoo& that) : PoisonSoo(*that) {}
+  ~PoisonSoo() { SanitizerUnpoisonObject(&data_); }
 
   int64_t operator*() const {
     SanitizerUnpoisonObject(&data_);
@@ -196,45 +229,66 @@
     return ret;
   }
   template <typename H>
-  friend H AbslHashValue(H h, const PoisonInline& pi) {
+  friend H AbslHashValue(H h, const PoisonSoo& pi) {
     return H::combine(std::move(h), *pi);
   }
-  bool operator==(const PoisonInline& rhs) const { return **this == *rhs; }
+  bool operator==(const PoisonSoo& rhs) const { return **this == *rhs; }
 };
 
-// Tests that we don't touch the poison_ member of PoisonInline.
-TEST(FlatHashSet, PoisonInline) {
-  PoisonInline a(0), b(1);
-  {  // basic usage
-    flat_hash_set<PoisonInline> set;
-    set.insert(a);
-    EXPECT_THAT(set, UnorderedElementsAre(a));
-    set.insert(b);
-    EXPECT_THAT(set, UnorderedElementsAre(a, b));
-    set.erase(a);
-    EXPECT_THAT(set, UnorderedElementsAre(b));
-    set.rehash(0);  // shrink to inline
-    EXPECT_THAT(set, UnorderedElementsAre(b));
-  }
-  {  // test move constructor from inline to inline
-    flat_hash_set<PoisonInline> set;
-    set.insert(a);
-    flat_hash_set<PoisonInline> set2(std::move(set));
-    EXPECT_THAT(set2, UnorderedElementsAre(a));
-  }
-  {  // test move assignment from inline to inline
-    flat_hash_set<PoisonInline> set, set2;
-    set.insert(a);
-    set2 = std::move(set);
-    EXPECT_THAT(set2, UnorderedElementsAre(a));
-  }
-  {  // test alloc move constructor from inline to inline
-    flat_hash_set<PoisonInline> set;
-    set.insert(a);
-    flat_hash_set<PoisonInline> set2(std::move(set),
-                                     std::allocator<PoisonInline>());
-    EXPECT_THAT(set2, UnorderedElementsAre(a));
-  }
+TEST(FlatHashSet, PoisonSooBasic) {
+  PoisonSoo a(0), b(1);
+  flat_hash_set<PoisonSoo> set;
+  set.insert(a);
+  EXPECT_THAT(set, UnorderedElementsAre(a));
+  set.insert(b);
+  EXPECT_THAT(set, UnorderedElementsAre(a, b));
+  set.erase(a);
+  EXPECT_THAT(set, UnorderedElementsAre(b));
+  set.rehash(0);  // Shrink to SOO.
+  EXPECT_THAT(set, UnorderedElementsAre(b));
+}
+
+TEST(FlatHashSet, PoisonSooMoveConstructSooToSoo) {
+  PoisonSoo a(0);
+  flat_hash_set<PoisonSoo> set;
+  set.insert(a);
+  flat_hash_set<PoisonSoo> set2(std::move(set));
+  EXPECT_THAT(set2, UnorderedElementsAre(a));
+}
+
+TEST(FlatHashSet, PoisonSooAllocMoveConstructSooToSoo) {
+  PoisonSoo a(0);
+  flat_hash_set<PoisonSoo> set;
+  set.insert(a);
+  flat_hash_set<PoisonSoo> set2(std::move(set), std::allocator<PoisonSoo>());
+  EXPECT_THAT(set2, UnorderedElementsAre(a));
+}
+
+TEST(FlatHashSet, PoisonSooMoveAssignFullSooToEmptySoo) {
+  PoisonSoo a(0);
+  flat_hash_set<PoisonSoo> set, set2;
+  set.insert(a);
+  set2 = std::move(set);
+  EXPECT_THAT(set2, UnorderedElementsAre(a));
+}
+
+TEST(FlatHashSet, PoisonSooMoveAssignFullSooToFullSoo) {
+  PoisonSoo a(0), b(1);
+  flat_hash_set<PoisonSoo> set, set2;
+  set.insert(a);
+  set2.insert(b);
+  set2 = std::move(set);
+  EXPECT_THAT(set2, UnorderedElementsAre(a));
+}
+
+TEST(FlatHashSet, FlatHashSetPolicyDestroyReturnsTrue) {
+  EXPECT_TRUE((decltype(FlatHashSetPolicy<int>::destroy<std::allocator<int>>(
+      nullptr, nullptr))()));
+  EXPECT_FALSE(
+      (decltype(FlatHashSetPolicy<int>::destroy<CountingAllocator<int>>(
+          nullptr, nullptr))()));
+  EXPECT_FALSE((decltype(FlatHashSetPolicy<std::unique_ptr<int>>::destroy<
+                         std::allocator<int>>(nullptr, nullptr))()));
 }
 
 }  // namespace
diff --git a/absl/container/hash_container_defaults.h b/absl/container/hash_container_defaults.h
new file mode 100644
index 0000000..eb944a7
--- /dev/null
+++ b/absl/container/hash_container_defaults.h
@@ -0,0 +1,45 @@
+// Copyright 2024 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_HASH_CONTAINER_DEFAULTS_H_
+#define ABSL_CONTAINER_HASH_CONTAINER_DEFAULTS_H_
+
+#include "absl/base/config.h"
+#include "absl/container/internal/hash_function_defaults.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+// DefaultHashContainerHash is a convenience alias for the functor that is used
+// by default by Abseil hash-based (unordered) containers for hashing when
+// `Hash` type argument is not explicitly specified.
+//
+// This type alias can be used by generic code that wants to provide more
+// flexibility for defining underlying containers.
+template <typename T>
+using DefaultHashContainerHash = absl::container_internal::hash_default_hash<T>;
+
+// DefaultHashContainerEq is a convenience alias for the functor that is used by
+// default by Abseil hash-based (unordered) containers for equality check when
+// `Eq` type argument is not explicitly specified.
+//
+// This type alias can be used by generic code that wants to provide more
+// flexibility for defining underlying containers.
+template <typename T>
+using DefaultHashContainerEq = absl::container_internal::hash_default_eq<T>;
+
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CONTAINER_HASH_CONTAINER_DEFAULTS_H_
diff --git a/absl/container/inlined_vector.h b/absl/container/inlined_vector.h
index 04e2c38..974b652 100644
--- a/absl/container/inlined_vector.h
+++ b/absl/container/inlined_vector.h
@@ -775,7 +775,20 @@
     ABSL_HARDENING_ASSERT(pos >= begin());
     ABSL_HARDENING_ASSERT(pos < end());
 
+    // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102329#c2
+    // It appears that GCC thinks that since `pos` is a const pointer and may
+    // point to uninitialized memory at this point, a warning should be
+    // issued. But `pos` is actually only used to compute an array index to
+    // write to.
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#pragma GCC diagnostic ignored "-Wuninitialized"
+#endif
     return storage_.Erase(pos, pos + 1);
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
   }
 
   // Overload of `InlinedVector::erase(...)` that erases every element in the
diff --git a/absl/container/inlined_vector_test.cc b/absl/container/inlined_vector_test.cc
index 241389a..6954262 100644
--- a/absl/container/inlined_vector_test.cc
+++ b/absl/container/inlined_vector_test.cc
@@ -304,6 +304,86 @@
   }
 }
 
+// Swapping containers of unique pointers should work fine, with no
+// leaks, despite the fact that unique pointers are trivially relocatable but
+// not trivially destructible.
+// TODO(absl-team): Using unique_ptr here is technically correct, but
+// a trivially relocatable struct would be less semantically confusing.
+TEST(UniquePtr, Swap) {
+  for (size_t size1 = 0; size1 < 5; ++size1) {
+    for (size_t size2 = 0; size2 < 5; ++size2) {
+      absl::InlinedVector<std::unique_ptr<size_t>, 2> a;
+      absl::InlinedVector<std::unique_ptr<size_t>, 2> b;
+      for (size_t i = 0; i < size1; ++i) {
+        a.push_back(std::make_unique<size_t>(i + 10));
+      }
+      for (size_t i = 0; i < size2; ++i) {
+        b.push_back(std::make_unique<size_t>(i + 20));
+      }
+      a.swap(b);
+      ASSERT_THAT(a, SizeIs(size2));
+      ASSERT_THAT(b, SizeIs(size1));
+      for (size_t i = 0; i < a.size(); ++i) {
+        ASSERT_THAT(a[i], Pointee(i + 20));
+      }
+      for (size_t i = 0; i < b.size(); ++i) {
+        ASSERT_THAT(b[i], Pointee(i + 10));
+      }
+    }
+  }
+}
+
+// Erasing from a container of unique pointers should work fine, with no
+// leaks, despite the fact that unique pointers are trivially relocatable but
+// not trivially destructible.
+// TODO(absl-team): Using unique_ptr here is technically correct, but
+// a trivially relocatable struct would be less semantically confusing.
+TEST(UniquePtr, EraseSingle) {
+  for (size_t size = 4; size < 16; ++size) {
+    absl::InlinedVector<std::unique_ptr<size_t>, 8> a;
+    for (size_t i = 0; i < size; ++i) {
+      a.push_back(std::make_unique<size_t>(i));
+    }
+    a.erase(a.begin());
+    ASSERT_THAT(a, SizeIs(size - 1));
+    for (size_t i = 0; i < size - 1; ++i) {
+      ASSERT_THAT(a[i], Pointee(i + 1));
+    }
+    a.erase(a.begin() + 2);
+    ASSERT_THAT(a, SizeIs(size - 2));
+    ASSERT_THAT(a[0], Pointee(1));
+    ASSERT_THAT(a[1], Pointee(2));
+    for (size_t i = 2; i < size - 2; ++i) {
+      ASSERT_THAT(a[i], Pointee(i + 2));
+    }
+  }
+}
+
+// Erasing from a container of unique pointers should work fine, with no
+// leaks, despite the fact that unique pointers are trivially relocatable but
+// not trivially destructible.
+// TODO(absl-team): Using unique_ptr here is technically correct, but
+// a trivially relocatable struct would be less semantically confusing.
+TEST(UniquePtr, EraseMulti) {
+  for (size_t size = 5; size < 16; ++size) {
+    absl::InlinedVector<std::unique_ptr<size_t>, 8> a;
+    for (size_t i = 0; i < size; ++i) {
+      a.push_back(std::make_unique<size_t>(i));
+    }
+    a.erase(a.begin(), a.begin() + 2);
+    ASSERT_THAT(a, SizeIs(size - 2));
+    for (size_t i = 0; i < size - 2; ++i) {
+      ASSERT_THAT(a[i], Pointee(i + 2));
+    }
+    a.erase(a.begin() + 1, a.begin() + 3);
+    ASSERT_THAT(a, SizeIs(size - 4));
+    ASSERT_THAT(a[0], Pointee(2));
+    for (size_t i = 1; i < size - 4; ++i) {
+      ASSERT_THAT(a[i], Pointee(i + 4));
+    }
+  }
+}
+
 // At the end of this test loop, the elements between [erase_begin, erase_end)
 // should have reference counts == 0, and all others elements should have
 // reference counts == 1.
@@ -783,7 +863,9 @@
   // The union should be absorbing some of the allocation bookkeeping overhead
   // in the larger vectors, leaving only the size_ field as overhead.
 
-  struct T { void* val; };
+  struct T {
+    void* val;
+  };
   size_t expected_overhead = sizeof(T);
 
   EXPECT_EQ((2 * expected_overhead),
diff --git a/absl/container/internal/btree.h b/absl/container/internal/btree.h
index 91df57a..689e71a 100644
--- a/absl/container/internal/btree.h
+++ b/absl/container/internal/btree.h
@@ -53,11 +53,11 @@
 #include <functional>
 #include <iterator>
 #include <limits>
-#include <new>
 #include <string>
 #include <type_traits>
 #include <utility>
 
+#include "absl/base/config.h"
 #include "absl/base/internal/raw_logging.h"
 #include "absl/base/macros.h"
 #include "absl/container/internal/common.h"
@@ -70,7 +70,6 @@
 #include "absl/strings/cord.h"
 #include "absl/strings/string_view.h"
 #include "absl/types/compare.h"
-#include "absl/utility/utility.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
@@ -78,9 +77,10 @@
 
 #ifdef ABSL_BTREE_ENABLE_GENERATIONS
 #error ABSL_BTREE_ENABLE_GENERATIONS cannot be directly set
-#elif defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
-    defined(ABSL_HAVE_HWADDRESS_SANITIZER) || \
-    defined(ABSL_HAVE_MEMORY_SANITIZER)
+#elif (defined(ABSL_HAVE_ADDRESS_SANITIZER) ||   \
+       defined(ABSL_HAVE_HWADDRESS_SANITIZER) || \
+       defined(ABSL_HAVE_MEMORY_SANITIZER)) &&   \
+    !defined(NDEBUG_SANITIZER)  // If defined, performance is important.
 // When compiled in sanitizer mode, we add generation integers to the nodes and
 // iterators. When iterators are used, we validate that the container has not
 // been mutated since the iterator was constructed.
@@ -475,7 +475,7 @@
 // useful information.
 template <typename V>
 struct SearchResult<V, false> {
-  SearchResult() {}
+  SearchResult() = default;
   explicit SearchResult(V v) : value(v) {}
   SearchResult(V v, MatchKind /*match*/) : value(v) {}
 
@@ -580,14 +580,12 @@
   using layout_type =
       absl::container_internal::Layout<btree_node *, uint32_t, field_type,
                                        slot_type, btree_node *>;
+  using leaf_layout_type = typename layout_type::template WithStaticSizes<
+      /*parent*/ 1,
+      /*generation*/ BtreeGenerationsEnabled() ? 1 : 0,
+      /*position, start, finish, max_count*/ 4>;
   constexpr static size_type SizeWithNSlots(size_type n) {
-    return layout_type(
-               /*parent*/ 1,
-               /*generation*/ BtreeGenerationsEnabled() ? 1 : 0,
-               /*position, start, finish, max_count*/ 4,
-               /*slots*/ n,
-               /*children*/ 0)
-        .AllocSize();
+    return leaf_layout_type(/*slots*/ n, /*children*/ 0).AllocSize();
   }
   // A lower bound for the overhead of fields other than slots in a leaf node.
   constexpr static size_type MinimumOverhead() {
@@ -619,27 +617,22 @@
   constexpr static size_type kNodeSlots =
       kNodeTargetSlots >= kMinNodeSlots ? kNodeTargetSlots : kMinNodeSlots;
 
+  using internal_layout_type = typename layout_type::template WithStaticSizes<
+      /*parent*/ 1,
+      /*generation*/ BtreeGenerationsEnabled() ? 1 : 0,
+      /*position, start, finish, max_count*/ 4, /*slots*/ kNodeSlots,
+      /*children*/ kNodeSlots + 1>;
+
   // The node is internal (i.e. is not a leaf node) if and only if `max_count`
   // has this value.
   constexpr static field_type kInternalNodeMaxCount = 0;
 
-  constexpr static layout_type Layout(const size_type slot_count,
-                                      const size_type child_count) {
-    return layout_type(
-        /*parent*/ 1,
-        /*generation*/ BtreeGenerationsEnabled() ? 1 : 0,
-        /*position, start, finish, max_count*/ 4,
-        /*slots*/ slot_count,
-        /*children*/ child_count);
-  }
   // Leaves can have less than kNodeSlots values.
-  constexpr static layout_type LeafLayout(
+  constexpr static leaf_layout_type LeafLayout(
       const size_type slot_count = kNodeSlots) {
-    return Layout(slot_count, 0);
+    return leaf_layout_type(slot_count, 0);
   }
-  constexpr static layout_type InternalLayout() {
-    return Layout(kNodeSlots, kNodeSlots + 1);
-  }
+  constexpr static auto InternalLayout() { return internal_layout_type(); }
   constexpr static size_type LeafSize(const size_type slot_count = kNodeSlots) {
     return LeafLayout(slot_count).AllocSize();
   }
@@ -1407,9 +1400,9 @@
     copy_or_move_values_in_order(other);
   }
   btree(btree &&other) noexcept
-      : root_(absl::exchange(other.root_, EmptyNode())),
+      : root_(std::exchange(other.root_, EmptyNode())),
         rightmost_(std::move(other.rightmost_)),
-        size_(absl::exchange(other.size_, 0u)) {
+        size_(std::exchange(other.size_, 0u)) {
     other.mutable_rightmost() = EmptyNode();
   }
   btree(btree &&other, const allocator_type &alloc)
diff --git a/absl/container/internal/common_policy_traits.h b/absl/container/internal/common_policy_traits.h
index 57eac67..c521f61 100644
--- a/absl/container/internal/common_policy_traits.h
+++ b/absl/container/internal/common_policy_traits.h
@@ -45,9 +45,10 @@
 
   // PRECONDITION: `slot` is INITIALIZED
   // POSTCONDITION: `slot` is UNINITIALIZED
+  // Returns std::true_type in case destroy is trivial.
   template <class Alloc>
-  static void destroy(Alloc* alloc, slot_type* slot) {
-    Policy::destroy(alloc, slot);
+  static auto destroy(Alloc* alloc, slot_type* slot) {
+    return Policy::destroy(alloc, slot);
   }
 
   // Transfers the `old_slot` to `new_slot`. Any memory allocated by the
@@ -63,7 +64,7 @@
   //                UNINITIALIZED
   template <class Alloc>
   static void transfer(Alloc* alloc, slot_type* new_slot, slot_type* old_slot) {
-    transfer_impl(alloc, new_slot, old_slot, Rank0{});
+    transfer_impl(alloc, new_slot, old_slot, Rank2{});
   }
 
   // PRECONDITION: `slot` is INITIALIZED
@@ -82,23 +83,31 @@
 
   static constexpr bool transfer_uses_memcpy() {
     return std::is_same<decltype(transfer_impl<std::allocator<char>>(
-                            nullptr, nullptr, nullptr, Rank0{})),
+                            nullptr, nullptr, nullptr, Rank2{})),
+                        std::true_type>::value;
+  }
+
+  // Returns true if destroy is trivial and can be omitted.
+  template <class Alloc>
+  static constexpr bool destroy_is_trivial() {
+    return std::is_same<decltype(destroy<Alloc>(nullptr, nullptr)),
                         std::true_type>::value;
   }
 
  private:
-  // To rank the overloads below for overload resolution. Rank0 is preferred.
-  struct Rank2 {};
-  struct Rank1 : Rank2 {};
-  struct Rank0 : Rank1 {};
+  // Use go/ranked-overloads for dispatching.
+  struct Rank0 {};
+  struct Rank1 : Rank0 {};
+  struct Rank2 : Rank1 {};
 
   // Use auto -> decltype as an enabler.
   // P::transfer returns std::true_type if transfer uses memcpy (e.g. in
   // node_slot_policy).
   template <class Alloc, class P = Policy>
   static auto transfer_impl(Alloc* alloc, slot_type* new_slot,
-                            slot_type* old_slot, Rank0)
-      -> decltype(P::transfer(alloc, new_slot, old_slot)) {
+                            slot_type* old_slot,
+                            Rank2) -> decltype(P::transfer(alloc, new_slot,
+                                                           old_slot)) {
     return P::transfer(alloc, new_slot, old_slot);
   }
 #if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
@@ -121,7 +130,7 @@
 
   template <class Alloc>
   static void transfer_impl(Alloc* alloc, slot_type* new_slot,
-                            slot_type* old_slot, Rank2) {
+                            slot_type* old_slot, Rank0) {
     construct(alloc, new_slot, std::move(element(old_slot)));
     destroy(alloc, old_slot);
   }
diff --git a/absl/container/internal/common_policy_traits_test.cc b/absl/container/internal/common_policy_traits_test.cc
index faee3e7..8d8f8ba 100644
--- a/absl/container/internal/common_policy_traits_test.cc
+++ b/absl/container/internal/common_policy_traits_test.cc
@@ -39,44 +39,59 @@
   using key_type = Slot;
   using init_type = Slot;
 
-  static std::function<void(void*, Slot*, Slot)> construct;
-  static std::function<void(void*, Slot*)> destroy;
+  struct PolicyFunctions {
+    std::function<void(void*, Slot*, Slot)> construct;
+    std::function<void(void*, Slot*)> destroy;
+    std::function<Slot&(Slot*)> element;
+  };
 
-  static std::function<Slot&(Slot*)> element;
+  static PolicyFunctions* functions() {
+    static PolicyFunctions* functions = new PolicyFunctions();
+    return functions;
+  }
+
+  static void construct(void* a, Slot* b, Slot c) {
+    functions()->construct(a, b, c);
+  }
+  static void destroy(void* a, Slot* b) { functions()->destroy(a, b); }
+  static Slot& element(Slot* b) { return functions()->element(b); }
 };
 
-std::function<void(void*, Slot*, Slot)> PolicyWithoutOptionalOps::construct;
-std::function<void(void*, Slot*)> PolicyWithoutOptionalOps::destroy;
-
-std::function<Slot&(Slot*)> PolicyWithoutOptionalOps::element;
-
 struct PolicyWithOptionalOps : PolicyWithoutOptionalOps {
-  static std::function<void(void*, Slot*, Slot*)> transfer;
-};
-std::function<void(void*, Slot*, Slot*)> PolicyWithOptionalOps::transfer;
+  struct TransferFunctions {
+    std::function<void(void*, Slot*, Slot*)> transfer;
+  };
 
-struct PolicyWithMemcpyTransfer : PolicyWithoutOptionalOps {
-  static std::function<std::true_type(void*, Slot*, Slot*)> transfer;
+  static TransferFunctions* transfer_fn() {
+    static TransferFunctions* transfer_fn = new TransferFunctions();
+    return transfer_fn;
+  }
+  static void transfer(void* a, Slot* b, Slot* c) {
+    transfer_fn()->transfer(a, b, c);
+  }
 };
-std::function<std::true_type(void*, Slot*, Slot*)>
-    PolicyWithMemcpyTransfer::transfer;
+
+struct PolicyWithMemcpyTransferAndTrivialDestroy : PolicyWithoutOptionalOps {
+  static std::true_type transfer(void*, Slot*, Slot*) { return {}; }
+  static std::true_type destroy(void*, Slot*) { return {}; }
+};
 
 struct Test : ::testing::Test {
   Test() {
-    PolicyWithoutOptionalOps::construct = [&](void* a1, Slot* a2, Slot a3) {
+    PolicyWithoutOptionalOps::functions()->construct = [&](void* a1, Slot* a2,
+                                                           Slot a3) {
       construct.Call(a1, a2, std::move(a3));
     };
-    PolicyWithoutOptionalOps::destroy = [&](void* a1, Slot* a2) {
+    PolicyWithoutOptionalOps::functions()->destroy = [&](void* a1, Slot* a2) {
       destroy.Call(a1, a2);
     };
 
-    PolicyWithoutOptionalOps::element = [&](Slot* a1) -> Slot& {
+    PolicyWithoutOptionalOps::functions()->element = [&](Slot* a1) -> Slot& {
       return element.Call(a1);
     };
 
-    PolicyWithOptionalOps::transfer = [&](void* a1, Slot* a2, Slot* a3) {
-      return transfer.Call(a1, a2, a3);
-    };
+    PolicyWithOptionalOps::transfer_fn()->transfer =
+        [&](void* a1, Slot* a2, Slot* a3) { return transfer.Call(a1, a2, a3); };
   }
 
   std::allocator<Slot> alloc;
@@ -125,7 +140,15 @@
   EXPECT_FALSE(
       common_policy_traits<PolicyWithOptionalOps>::transfer_uses_memcpy());
   EXPECT_TRUE(
-      common_policy_traits<PolicyWithMemcpyTransfer>::transfer_uses_memcpy());
+      common_policy_traits<
+          PolicyWithMemcpyTransferAndTrivialDestroy>::transfer_uses_memcpy());
+}
+
+TEST(DestroyIsTrivial, Basic) {
+  EXPECT_FALSE(common_policy_traits<PolicyWithOptionalOps>::destroy_is_trivial<
+               std::allocator<char>>());
+  EXPECT_TRUE(common_policy_traits<PolicyWithMemcpyTransferAndTrivialDestroy>::
+                  destroy_is_trivial<std::allocator<char>>());
 }
 
 }  // namespace
diff --git a/absl/container/internal/compressed_tuple.h b/absl/container/internal/compressed_tuple.h
index 59e70eb..6db0468 100644
--- a/absl/container/internal/compressed_tuple.h
+++ b/absl/container/internal/compressed_tuple.h
@@ -87,11 +87,11 @@
   constexpr Storage() = default;
   template <typename V>
   explicit constexpr Storage(absl::in_place_t, V&& v)
-      : value(absl::forward<V>(v)) {}
+      : value(std::forward<V>(v)) {}
   constexpr const T& get() const& { return value; }
-  T& get() & { return value; }
-  constexpr const T&& get() const&& { return absl::move(*this).value; }
-  T&& get() && { return std::move(*this).value; }
+  constexpr T& get() & { return value; }
+  constexpr const T&& get() const&& { return std::move(*this).value; }
+  constexpr T&& get() && { return std::move(*this).value; }
 };
 
 template <typename T, size_t I>
@@ -99,13 +99,12 @@
   constexpr Storage() = default;
 
   template <typename V>
-  explicit constexpr Storage(absl::in_place_t, V&& v)
-      : T(absl::forward<V>(v)) {}
+  explicit constexpr Storage(absl::in_place_t, V&& v) : T(std::forward<V>(v)) {}
 
   constexpr const T& get() const& { return *this; }
-  T& get() & { return *this; }
-  constexpr const T&& get() const&& { return absl::move(*this); }
-  T&& get() && { return std::move(*this); }
+  constexpr T& get() & { return *this; }
+  constexpr const T&& get() const&& { return std::move(*this); }
+  constexpr T&& get() && { return std::move(*this); }
 };
 
 template <typename D, typename I, bool ShouldAnyUseBase>
@@ -123,7 +122,7 @@
   constexpr CompressedTupleImpl() = default;
   template <typename... Vs>
   explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args)
-      : Storage<Ts, I>(absl::in_place, absl::forward<Vs>(args))... {}
+      : Storage<Ts, I>(absl::in_place, std::forward<Vs>(args))... {}
   friend CompressedTuple<Ts...>;
 };
 
@@ -135,7 +134,7 @@
   constexpr CompressedTupleImpl() = default;
   template <typename... Vs>
   explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args)
-      : Storage<Ts, I, false>(absl::in_place, absl::forward<Vs>(args))... {}
+      : Storage<Ts, I, false>(absl::in_place, std::forward<Vs>(args))... {}
   friend CompressedTuple<Ts...>;
 };
 
@@ -234,11 +233,11 @@
                 bool> = true>
   explicit constexpr CompressedTuple(First&& first, Vs&&... base)
       : CompressedTuple::CompressedTupleImpl(absl::in_place,
-                                             absl::forward<First>(first),
-                                             absl::forward<Vs>(base)...) {}
+                                             std::forward<First>(first),
+                                             std::forward<Vs>(base)...) {}
 
   template <int I>
-  ElemT<I>& get() & {
+  constexpr ElemT<I>& get() & {
     return StorageT<I>::get();
   }
 
@@ -248,13 +247,13 @@
   }
 
   template <int I>
-  ElemT<I>&& get() && {
+  constexpr ElemT<I>&& get() && {
     return std::move(*this).StorageT<I>::get();
   }
 
   template <int I>
   constexpr const ElemT<I>&& get() const&& {
-    return absl::move(*this).StorageT<I>::get();
+    return std::move(*this).StorageT<I>::get();
   }
 };
 
diff --git a/absl/container/internal/compressed_tuple_test.cc b/absl/container/internal/compressed_tuple_test.cc
index 74111f9..c3edf54 100644
--- a/absl/container/internal/compressed_tuple_test.cc
+++ b/absl/container/internal/compressed_tuple_test.cc
@@ -15,7 +15,11 @@
 #include "absl/container/internal/compressed_tuple.h"
 
 #include <memory>
+#include <set>
 #include <string>
+#include <type_traits>
+#include <utility>
+#include <vector>
 
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
@@ -27,14 +31,22 @@
 
 // These are declared at global scope purely so that error messages
 // are smaller and easier to understand.
-enum class CallType { kConstRef, kConstMove };
+enum class CallType { kMutableRef, kConstRef, kMutableMove, kConstMove };
 
 template <int>
 struct Empty {
+  constexpr CallType value() & { return CallType::kMutableRef; }
   constexpr CallType value() const& { return CallType::kConstRef; }
+  constexpr CallType value() && { return CallType::kMutableMove; }
   constexpr CallType value() const&& { return CallType::kConstMove; }
 };
 
+// Unconditionally return an lvalue reference to `t`.
+template <typename T>
+constexpr T& AsLValue(T&& t) {
+  return t;
+}
+
 template <typename T>
 struct NotEmpty {
   T value;
@@ -54,6 +66,7 @@
 
 using absl::test_internal::CopyableMovableInstance;
 using absl::test_internal::InstanceTracker;
+using ::testing::Each;
 
 TEST(CompressedTupleTest, Sizeof) {
   EXPECT_EQ(sizeof(int), sizeof(CompressedTuple<int>));
@@ -70,6 +83,30 @@
             sizeof(CompressedTuple<int, Empty<0>, NotEmpty<double>, Empty<1>>));
 }
 
+TEST(CompressedTupleTest, PointerToEmpty) {
+  auto to_void_ptrs = [](const auto&... objs) {
+    return std::vector<const void*>{static_cast<const void*>(&objs)...};
+  };
+  {
+    using Tuple = CompressedTuple<int, Empty<0>>;
+    EXPECT_EQ(sizeof(int), sizeof(Tuple));
+    Tuple t;
+    EXPECT_THAT(to_void_ptrs(t.get<1>()), Each(&t));
+  }
+  {
+    using Tuple = CompressedTuple<int, Empty<0>, Empty<1>>;
+    EXPECT_EQ(sizeof(int), sizeof(Tuple));
+    Tuple t;
+    EXPECT_THAT(to_void_ptrs(t.get<1>(), t.get<2>()), Each(&t));
+  }
+  {
+    using Tuple = CompressedTuple<int, Empty<0>, Empty<1>, Empty<2>>;
+    EXPECT_EQ(sizeof(int), sizeof(Tuple));
+    Tuple t;
+    EXPECT_THAT(to_void_ptrs(t.get<1>(), t.get<2>(), t.get<3>()), Each(&t));
+  }
+}
+
 TEST(CompressedTupleTest, OneMoveOnRValueConstructionTemp) {
   InstanceTracker tracker;
   CompressedTuple<CopyableMovableInstance> x1(CopyableMovableInstance(1));
@@ -346,8 +383,24 @@
     constexpr int value() const { return v; }
     int v;
   };
-  constexpr CompressedTuple<int, double, CompressedTuple<int>, Empty<0>> x(
-      7, 1.25, CompressedTuple<int>(5), {});
+
+  using Tuple = CompressedTuple<int, double, CompressedTuple<int>, Empty<0>>;
+
+  constexpr int r0 =
+      AsLValue(Tuple(1, 0.75, CompressedTuple<int>(9), {})).get<0>();
+  constexpr double r1 =
+      AsLValue(Tuple(1, 0.75, CompressedTuple<int>(9), {})).get<1>();
+  constexpr int r2 =
+      AsLValue(Tuple(1, 0.75, CompressedTuple<int>(9), {})).get<2>().get<0>();
+  constexpr CallType r3 =
+      AsLValue(Tuple(1, 0.75, CompressedTuple<int>(9), {})).get<3>().value();
+
+  EXPECT_EQ(r0, 1);
+  EXPECT_EQ(r1, 0.75);
+  EXPECT_EQ(r2, 9);
+  EXPECT_EQ(r3, CallType::kMutableRef);
+
+  constexpr Tuple x(7, 1.25, CompressedTuple<int>(5), {});
   constexpr int x0 = x.get<0>();
   constexpr double x1 = x.get<1>();
   constexpr int x2 = x.get<2>().get<0>();
@@ -358,7 +411,18 @@
   EXPECT_EQ(x2, 5);
   EXPECT_EQ(x3, CallType::kConstRef);
 
-#if !defined(__GNUC__) || defined(__clang__) || __GNUC__ > 4
+  constexpr int m0 = Tuple(5, 0.25, CompressedTuple<int>(3), {}).get<0>();
+  constexpr double m1 = Tuple(5, 0.25, CompressedTuple<int>(3), {}).get<1>();
+  constexpr int m2 =
+      Tuple(5, 0.25, CompressedTuple<int>(3), {}).get<2>().get<0>();
+  constexpr CallType m3 =
+      Tuple(5, 0.25, CompressedTuple<int>(3), {}).get<3>().value();
+
+  EXPECT_EQ(m0, 5);
+  EXPECT_EQ(m1, 0.25);
+  EXPECT_EQ(m2, 3);
+  EXPECT_EQ(m3, CallType::kMutableMove);
+
   constexpr CompressedTuple<Empty<0>, TrivialStruct, int> trivial = {};
   constexpr CallType trivial0 = trivial.get<0>().value();
   constexpr int trivial1 = trivial.get<1>().value();
@@ -367,7 +431,6 @@
   EXPECT_EQ(trivial0, CallType::kConstRef);
   EXPECT_EQ(trivial1, 0);
   EXPECT_EQ(trivial2, 0);
-#endif
 
   constexpr CompressedTuple<Empty<0>, NonTrivialStruct, absl::optional<int>>
       non_trivial = {};
@@ -386,8 +449,8 @@
 
 #if defined(__clang__)
   // An apparent bug in earlier versions of gcc claims these are ambiguous.
-  constexpr int x2m = absl::move(x.get<2>()).get<0>();
-  constexpr CallType x3m = absl::move(x).get<3>().value();
+  constexpr int x2m = std::move(x.get<2>()).get<0>();
+  constexpr CallType x3m = std::move(x).get<3>().value();
   EXPECT_EQ(x2m, 5);
   EXPECT_EQ(x3m, CallType::kConstMove);
 #endif
diff --git a/absl/container/internal/container_memory.h b/absl/container/internal/container_memory.h
index 3262d4e..ba8e08a 100644
--- a/absl/container/internal/container_memory.h
+++ b/absl/container/internal/container_memory.h
@@ -68,6 +68,18 @@
   return p;
 }
 
+// Returns true if the destruction of the value with given Allocator will be
+// trivial.
+template <class Allocator, class ValueType>
+constexpr auto IsDestructionTrivial() {
+  constexpr bool result =
+      std::is_trivially_destructible<ValueType>::value &&
+      std::is_same<typename absl::allocator_traits<
+                       Allocator>::template rebind_alloc<char>,
+                   std::allocator<char>>::value;
+  return std::integral_constant<bool, result>();
+}
+
 // The pointer must have been previously obtained by calling
 // Allocate<Alignment>(alloc, n).
 template <size_t Alignment, class Alloc>
@@ -414,12 +426,13 @@
   }
 
   template <class Allocator>
-  static void destroy(Allocator* alloc, slot_type* slot) {
+  static auto destroy(Allocator* alloc, slot_type* slot) {
     if (kMutableKeys::value) {
       absl::allocator_traits<Allocator>::destroy(*alloc, &slot->mutable_value);
     } else {
       absl::allocator_traits<Allocator>::destroy(*alloc, &slot->value);
     }
+    return IsDestructionTrivial<Allocator, value_type>();
   }
 
   template <class Allocator>
@@ -451,6 +464,26 @@
   }
 };
 
+// Type erased function for computing hash of the slot.
+using HashSlotFn = size_t (*)(const void* hash_fn, void* slot);
+
+// Type erased function to apply `Fn` to data inside of the `slot`.
+// The data is expected to have type `T`.
+template <class Fn, class T>
+size_t TypeErasedApplyToSlotFn(const void* fn, void* slot) {
+  const auto* f = static_cast<const Fn*>(fn);
+  return (*f)(*static_cast<const T*>(slot));
+}
+
+// Type erased function to apply `Fn` to data inside of the `*slot_ptr`.
+// The data is expected to have type `T`.
+template <class Fn, class T>
+size_t TypeErasedDerefAndApplyToSlotFn(const void* fn, void* slot_ptr) {
+  const auto* f = static_cast<const Fn*>(fn);
+  const T* slot = *static_cast<const T**>(slot_ptr);
+  return (*f)(*slot);
+}
+
 }  // namespace container_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/absl/container/internal/container_memory_test.cc b/absl/container/internal/container_memory_test.cc
index 90d64bf..7e4357d 100644
--- a/absl/container/internal/container_memory_test.cc
+++ b/absl/container/internal/container_memory_test.cc
@@ -280,6 +280,38 @@
   }
 }
 
+TEST(MapSlotPolicy, DestroyReturnsTrue) {
+  {
+    using slot_policy = map_slot_policy<int, float>;
+    EXPECT_TRUE(
+        (std::is_same<decltype(slot_policy::destroy<std::allocator<char>>(
+                          nullptr, nullptr)),
+                      std::true_type>::value));
+  }
+  {
+    EXPECT_FALSE(std::is_trivially_destructible<std::unique_ptr<int>>::value);
+    using slot_policy = map_slot_policy<int, std::unique_ptr<int>>;
+    EXPECT_TRUE(
+        (std::is_same<decltype(slot_policy::destroy<std::allocator<char>>(
+                          nullptr, nullptr)),
+                      std::false_type>::value));
+  }
+}
+
+TEST(ApplyTest, TypeErasedApplyToSlotFn) {
+  size_t x = 7;
+  auto fn = [](size_t v) { return v * 2; };
+  EXPECT_EQ((TypeErasedApplyToSlotFn<decltype(fn), size_t>(&fn, &x)), 14);
+}
+
+TEST(ApplyTest, TypeErasedDerefAndApplyToSlotFn) {
+  size_t x = 7;
+  auto fn = [](size_t v) { return v * 2; };
+  size_t* x_ptr = &x;
+  EXPECT_EQ(
+      (TypeErasedDerefAndApplyToSlotFn<decltype(fn), size_t>(&fn, &x_ptr)), 14);
+}
+
 }  // namespace
 }  // namespace container_internal
 ABSL_NAMESPACE_END
diff --git a/absl/container/internal/hash_function_defaults.h b/absl/container/internal/hash_function_defaults.h
index a3613b4..0f07bcf 100644
--- a/absl/container/internal/hash_function_defaults.h
+++ b/absl/container/internal/hash_function_defaults.h
@@ -45,14 +45,16 @@
 #ifndef ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_
 #define ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_
 
-#include <stdint.h>
 #include <cstddef>
+#include <functional>
 #include <memory>
 #include <string>
 #include <type_traits>
 
 #include "absl/base/config.h"
+#include "absl/container/internal/common.h"
 #include "absl/hash/hash.h"
+#include "absl/meta/type_traits.h"
 #include "absl/strings/cord.h"
 #include "absl/strings/string_view.h"
 
@@ -188,6 +190,71 @@
 template <class T>
 struct HashEq<std::shared_ptr<T>> : HashEq<T*> {};
 
+template <typename T, typename E = void>
+struct HasAbslContainerHash : std::false_type {};
+
+template <typename T>
+struct HasAbslContainerHash<T, absl::void_t<typename T::absl_container_hash>>
+    : std::true_type {};
+
+template <typename T, typename E = void>
+struct HasAbslContainerEq : std::false_type {};
+
+template <typename T>
+struct HasAbslContainerEq<T, absl::void_t<typename T::absl_container_eq>>
+    : std::true_type {};
+
+template <typename T, typename E = void>
+struct AbslContainerEq {
+  using type = std::equal_to<>;
+};
+
+template <typename T>
+struct AbslContainerEq<
+    T, typename std::enable_if_t<HasAbslContainerEq<T>::value>> {
+  using type = typename T::absl_container_eq;
+};
+
+template <typename T, typename E = void>
+struct AbslContainerHash {
+  using type = void;
+};
+
+template <typename T>
+struct AbslContainerHash<
+    T, typename std::enable_if_t<HasAbslContainerHash<T>::value>> {
+  using type = typename T::absl_container_hash;
+};
+
+// HashEq specialization for user types that provide `absl_container_hash` and
+// (optionally) `absl_container_eq`. This specialization allows user types to
+// provide heterogeneous lookup without requiring to explicitly specify Hash/Eq
+// type arguments in unordered Abseil containers.
+//
+// Both `absl_container_hash` and `absl_container_eq` should be transparent
+// (have inner is_transparent type). While there is no technical reason to
+// restrict to transparent-only types, there is also no feasible use case when
+// it shouldn't be transparent - it is easier to relax the requirement later if
+// such a case arises rather than restricting it.
+//
+// If type provides only `absl_container_hash` then `eq` part will be
+// `std::equal_to<void>`.
+//
+// User types are not allowed to provide only a `Eq` part as there is no
+// feasible use case for this behavior - if Hash should be a default one then Eq
+// should be an equivalent to the `std::equal_to<T>`.
+template <typename T>
+struct HashEq<T, typename std::enable_if_t<HasAbslContainerHash<T>::value>> {
+  using Hash = typename AbslContainerHash<T>::type;
+  using Eq = typename AbslContainerEq<T>::type;
+  static_assert(IsTransparent<Hash>::value,
+                "absl_container_hash must be transparent. To achieve it add a "
+                "`using is_transparent = void;` clause to this type.");
+  static_assert(IsTransparent<Eq>::value,
+                "absl_container_eq must be transparent. To achieve it add a "
+                "`using is_transparent = void;` clause to this type.");
+};
+
 // This header's visibility is restricted.  If you need to access the default
 // hasher please use the container's ::hasher alias instead.
 //
diff --git a/absl/container/internal/hash_function_defaults_test.cc b/absl/container/internal/hash_function_defaults_test.cc
index c31af3b..912d119 100644
--- a/absl/container/internal/hash_function_defaults_test.cc
+++ b/absl/container/internal/hash_function_defaults_test.cc
@@ -14,11 +14,15 @@
 
 #include "absl/container/internal/hash_function_defaults.h"
 
+#include <cstddef>
 #include <functional>
 #include <type_traits>
 #include <utility>
 
 #include "gtest/gtest.h"
+#include "absl/container/flat_hash_map.h"
+#include "absl/container/flat_hash_set.h"
+#include "absl/hash/hash.h"
 #include "absl/random/random.h"
 #include "absl/strings/cord.h"
 #include "absl/strings/cord_test_helpers.h"
@@ -476,26 +480,157 @@
   hash_default_hash<typename T::first_type> hash;
 };
 
-TYPED_TEST_SUITE_P(StringLikeTest);
+TYPED_TEST_SUITE(StringLikeTest, StringTypesCartesianProduct);
 
-TYPED_TEST_P(StringLikeTest, Eq) {
+TYPED_TEST(StringLikeTest, Eq) {
   EXPECT_TRUE(this->eq(this->a1, this->b1));
   EXPECT_TRUE(this->eq(this->b1, this->a1));
 }
 
-TYPED_TEST_P(StringLikeTest, NotEq) {
+TYPED_TEST(StringLikeTest, NotEq) {
   EXPECT_FALSE(this->eq(this->a1, this->b2));
   EXPECT_FALSE(this->eq(this->b2, this->a1));
 }
 
-TYPED_TEST_P(StringLikeTest, HashEq) {
+TYPED_TEST(StringLikeTest, HashEq) {
   EXPECT_EQ(this->hash(this->a1), this->hash(this->b1));
   EXPECT_EQ(this->hash(this->a2), this->hash(this->b2));
   // It would be a poor hash function which collides on these strings.
   EXPECT_NE(this->hash(this->a1), this->hash(this->b2));
 }
 
-TYPED_TEST_SUITE(StringLikeTest, StringTypesCartesianProduct);
+struct TypeWithAbslContainerHash {
+  struct absl_container_hash {
+    using is_transparent = void;
+
+    size_t operator()(const TypeWithAbslContainerHash& foo) const {
+      return absl::HashOf(foo.value);
+    }
+
+    // Extra overload to test that heterogeneity works for this hasher.
+    size_t operator()(int value) const { return absl::HashOf(value); }
+  };
+
+  friend bool operator==(const TypeWithAbslContainerHash& lhs,
+                         const TypeWithAbslContainerHash& rhs) {
+    return lhs.value == rhs.value;
+  }
+
+  friend bool operator==(const TypeWithAbslContainerHash& lhs, int rhs) {
+    return lhs.value == rhs;
+  }
+
+  int value;
+  int noise;
+};
+
+struct TypeWithAbslContainerHashAndEq {
+  struct absl_container_hash {
+    using is_transparent = void;
+
+    size_t operator()(const TypeWithAbslContainerHashAndEq& foo) const {
+      return absl::HashOf(foo.value);
+    }
+
+    // Extra overload to test that heterogeneity works for this hasher.
+    size_t operator()(int value) const { return absl::HashOf(value); }
+  };
+
+  struct absl_container_eq {
+    using is_transparent = void;
+
+    bool operator()(const TypeWithAbslContainerHashAndEq& lhs,
+                    const TypeWithAbslContainerHashAndEq& rhs) const {
+      return lhs.value == rhs.value;
+    }
+
+    // Extra overload to test that heterogeneity works for this eq.
+    bool operator()(const TypeWithAbslContainerHashAndEq& lhs, int rhs) const {
+      return lhs.value == rhs;
+    }
+  };
+
+  template <typename T>
+  bool operator==(T&& other) const = delete;
+
+  int value;
+  int noise;
+};
+
+using AbslContainerHashTypes =
+    Types<TypeWithAbslContainerHash, TypeWithAbslContainerHashAndEq>;
+
+template <typename T>
+using AbslContainerHashTest = ::testing::Test;
+
+TYPED_TEST_SUITE(AbslContainerHashTest, AbslContainerHashTypes);
+
+TYPED_TEST(AbslContainerHashTest, HasherWorks) {
+  hash_default_hash<TypeParam> hasher;
+
+  TypeParam foo1{/*value=*/1, /*noise=*/100};
+  TypeParam foo1_copy{/*value=*/1, /*noise=*/20};
+  TypeParam foo2{/*value=*/2, /*noise=*/100};
+
+  EXPECT_EQ(hasher(foo1), absl::HashOf(1));
+  EXPECT_EQ(hasher(foo2), absl::HashOf(2));
+  EXPECT_EQ(hasher(foo1), hasher(foo1_copy));
+
+  // Heterogeneity works.
+  EXPECT_EQ(hasher(foo1), hasher(1));
+  EXPECT_EQ(hasher(foo2), hasher(2));
+}
+
+TYPED_TEST(AbslContainerHashTest, EqWorks) {
+  hash_default_eq<TypeParam> eq;
+
+  TypeParam foo1{/*value=*/1, /*noise=*/100};
+  TypeParam foo1_copy{/*value=*/1, /*noise=*/20};
+  TypeParam foo2{/*value=*/2, /*noise=*/100};
+
+  EXPECT_TRUE(eq(foo1, foo1_copy));
+  EXPECT_FALSE(eq(foo1, foo2));
+
+  // Heterogeneity works.
+  EXPECT_TRUE(eq(foo1, 1));
+  EXPECT_FALSE(eq(foo1, 2));
+}
+
+TYPED_TEST(AbslContainerHashTest, HeterogeneityInMapWorks) {
+  absl::flat_hash_map<TypeParam, int> map;
+
+  TypeParam foo1{/*value=*/1, /*noise=*/100};
+  TypeParam foo1_copy{/*value=*/1, /*noise=*/20};
+  TypeParam foo2{/*value=*/2, /*noise=*/100};
+  TypeParam foo3{/*value=*/3, /*noise=*/100};
+
+  map[foo1] = 1;
+  map[foo2] = 2;
+
+  EXPECT_TRUE(map.contains(foo1_copy));
+  EXPECT_EQ(map.at(foo1_copy), 1);
+  EXPECT_TRUE(map.contains(1));
+  EXPECT_EQ(map.at(1), 1);
+  EXPECT_TRUE(map.contains(2));
+  EXPECT_EQ(map.at(2), 2);
+  EXPECT_FALSE(map.contains(foo3));
+  EXPECT_FALSE(map.contains(3));
+}
+
+TYPED_TEST(AbslContainerHashTest, HeterogeneityInSetWorks) {
+  absl::flat_hash_set<TypeParam> set;
+
+  TypeParam foo1{/*value=*/1, /*noise=*/100};
+  TypeParam foo1_copy{/*value=*/1, /*noise=*/20};
+  TypeParam foo2{/*value=*/2, /*noise=*/100};
+
+  set.insert(foo1);
+
+  EXPECT_TRUE(set.contains(foo1_copy));
+  EXPECT_TRUE(set.contains(1));
+  EXPECT_FALSE(set.contains(foo2));
+  EXPECT_FALSE(set.contains(2));
+}
 
 }  // namespace
 }  // namespace container_internal
@@ -503,7 +638,7 @@
 }  // namespace absl
 
 enum Hash : size_t {
-  kStd = 0x1,       // std::hash
+  kStd = 0x1,  // std::hash
 #ifdef _MSC_VER
   kExtension = kStd,  // In MSVC, std::hash == ::hash
 #else                 // _MSC_VER
diff --git a/absl/container/internal/hash_policy_testing.h b/absl/container/internal/hash_policy_testing.h
index 01c40d2..66bb12e 100644
--- a/absl/container/internal/hash_policy_testing.h
+++ b/absl/container/internal/hash_policy_testing.h
@@ -174,8 +174,7 @@
 // From GCC-4.9 Changelog: (src: https://gcc.gnu.org/gcc-4.9/changes.html)
 // "the unordered associative containers in <unordered_map> and <unordered_set>
 // meet the allocator-aware container requirements;"
-#if (defined(__GLIBCXX__) && __GLIBCXX__ <= 20140425 ) || \
-( __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 9 ))
+#if defined(__GLIBCXX__) && __GLIBCXX__ <= 20140425
 #define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 0
 #else
 #define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 1
diff --git a/absl/container/internal/hash_policy_traits.h b/absl/container/internal/hash_policy_traits.h
index 164ec12..ad835d6 100644
--- a/absl/container/internal/hash_policy_traits.h
+++ b/absl/container/internal/hash_policy_traits.h
@@ -148,6 +148,56 @@
   static auto value(T* elem) -> decltype(P::value(elem)) {
     return P::value(elem);
   }
+
+  using HashSlotFn = size_t (*)(const void* hash_fn, void* slot);
+
+  template <class Hash>
+  static constexpr HashSlotFn get_hash_slot_fn() {
+// get_hash_slot_fn may return nullptr to signal that non type erased function
+// should be used. GCC warns against comparing function address with nullptr.
+#if defined(__GNUC__) && !defined(__clang__)
+#pragma GCC diagnostic push
+// silent error: the address of * will never be NULL [-Werror=address]
+#pragma GCC diagnostic ignored "-Waddress"
+#endif
+    return Policy::template get_hash_slot_fn<Hash>() == nullptr
+               ? &hash_slot_fn_non_type_erased<Hash>
+               : Policy::template get_hash_slot_fn<Hash>();
+#if defined(__GNUC__) && !defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+  }
+
+  // Whether small object optimization is enabled. True by default.
+  static constexpr bool soo_enabled() { return soo_enabled_impl(Rank1{}); }
+
+ private:
+  template <class Hash>
+  struct HashElement {
+    template <class K, class... Args>
+    size_t operator()(const K& key, Args&&...) const {
+      return h(key);
+    }
+    const Hash& h;
+  };
+
+  template <class Hash>
+  static size_t hash_slot_fn_non_type_erased(const void* hash_fn, void* slot) {
+    return Policy::apply(HashElement<Hash>{*static_cast<const Hash*>(hash_fn)},
+                         Policy::element(static_cast<slot_type*>(slot)));
+  }
+
+  // Use go/ranked-overloads for dispatching. Rank1 is preferred.
+  struct Rank0 {};
+  struct Rank1 : Rank0 {};
+
+  // Use auto -> decltype as an enabler.
+  template <class P = Policy>
+  static constexpr auto soo_enabled_impl(Rank1) -> decltype(P::soo_enabled()) {
+    return P::soo_enabled();
+  }
+
+  static constexpr bool soo_enabled_impl(Rank0) { return true; }
 };
 
 }  // namespace container_internal
diff --git a/absl/container/internal/hash_policy_traits_test.cc b/absl/container/internal/hash_policy_traits_test.cc
index 82d7cc3..2d2c7c2 100644
--- a/absl/container/internal/hash_policy_traits_test.cc
+++ b/absl/container/internal/hash_policy_traits_test.cc
@@ -14,12 +14,14 @@
 
 #include "absl/container/internal/hash_policy_traits.h"
 
+#include <cstddef>
 #include <functional>
 #include <memory>
 #include <new>
 
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
+#include "absl/container/internal/container_memory.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
@@ -42,6 +44,11 @@
   static int apply(int v) { return apply_impl(v); }
   static std::function<int(int)> apply_impl;
   static std::function<Slot&(Slot*)> value;
+
+  template <class Hash>
+  static constexpr HashSlotFn get_hash_slot_fn() {
+    return nullptr;
+  }
 };
 
 std::function<int(int)> PolicyWithoutOptionalOps::apply_impl;
@@ -74,6 +81,63 @@
   EXPECT_EQ(&b, &hash_policy_traits<PolicyWithoutOptionalOps>::value(&a));
 }
 
+struct Hash {
+  size_t operator()(Slot a) const { return static_cast<size_t>(a) * 5; }
+};
+
+struct PolicyNoHashFn {
+  using slot_type = Slot;
+  using key_type = Slot;
+  using init_type = Slot;
+
+  static size_t* apply_called_count;
+
+  static Slot& element(Slot* slot) { return *slot; }
+  template <typename Fn>
+  static size_t apply(const Fn& fn, int v) {
+    ++(*apply_called_count);
+    return fn(v);
+  }
+
+  template <class Hash>
+  static constexpr HashSlotFn get_hash_slot_fn() {
+    return nullptr;
+  }
+};
+
+size_t* PolicyNoHashFn::apply_called_count;
+
+struct PolicyCustomHashFn : PolicyNoHashFn {
+  template <class Hash>
+  static constexpr HashSlotFn get_hash_slot_fn() {
+    return &TypeErasedApplyToSlotFn<Hash, int>;
+  }
+};
+
+TEST(HashTest, PolicyNoHashFn_get_hash_slot_fn) {
+  size_t apply_called_count = 0;
+  PolicyNoHashFn::apply_called_count = &apply_called_count;
+
+  Hash hasher;
+  Slot value = 7;
+  auto* fn = hash_policy_traits<PolicyNoHashFn>::get_hash_slot_fn<Hash>();
+  EXPECT_NE(fn, nullptr);
+  EXPECT_EQ(fn(&hasher, &value), hasher(value));
+  EXPECT_EQ(apply_called_count, 1);
+}
+
+TEST(HashTest, PolicyCustomHashFn_get_hash_slot_fn) {
+  size_t apply_called_count = 0;
+  PolicyNoHashFn::apply_called_count = &apply_called_count;
+
+  Hash hasher;
+  Slot value = 7;
+  auto* fn = hash_policy_traits<PolicyCustomHashFn>::get_hash_slot_fn<Hash>();
+  EXPECT_EQ(fn, PolicyCustomHashFn::get_hash_slot_fn<Hash>());
+  EXPECT_EQ(fn(&hasher, &value), hasher(value));
+  EXPECT_EQ(apply_called_count, 0);
+}
+
 }  // namespace
 }  // namespace container_internal
 ABSL_NAMESPACE_END
diff --git a/absl/container/internal/hashtablez_sampler.cc b/absl/container/internal/hashtablez_sampler.cc
index 79a0973..fd21d96 100644
--- a/absl/container/internal/hashtablez_sampler.cc
+++ b/absl/container/internal/hashtablez_sampler.cc
@@ -18,12 +18,18 @@
 #include <atomic>
 #include <cassert>
 #include <cmath>
+#include <cstddef>
+#include <cstdint>
 #include <functional>
 #include <limits>
 
 #include "absl/base/attributes.h"
 #include "absl/base/config.h"
+#include "absl/base/internal/per_thread_tls.h"
 #include "absl/base/internal/raw_logging.h"
+#include "absl/base/macros.h"
+#include "absl/base/no_destructor.h"
+#include "absl/base/optimization.h"
 #include "absl/debugging/stacktrace.h"
 #include "absl/memory/memory.h"
 #include "absl/profiling/internal/exponential_biased.h"
@@ -64,7 +70,7 @@
 #endif  // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
 
 HashtablezSampler& GlobalHashtablezSampler() {
-  static auto* sampler = new HashtablezSampler();
+  static absl::NoDestructor<HashtablezSampler> sampler;
   return *sampler;
 }
 
@@ -72,7 +78,10 @@
 HashtablezInfo::~HashtablezInfo() = default;
 
 void HashtablezInfo::PrepareForSampling(int64_t stride,
-                                        size_t inline_element_size_value) {
+                                        size_t inline_element_size_value,
+                                        size_t key_size_value,
+                                        size_t value_size_value,
+                                        uint16_t soo_capacity_value) {
   capacity.store(0, std::memory_order_relaxed);
   size.store(0, std::memory_order_relaxed);
   num_erases.store(0, std::memory_order_relaxed);
@@ -92,6 +101,9 @@
   depth = absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth,
                               /* skip_count= */ 0);
   inline_element_size = inline_element_size_value;
+  key_size = key_size_value;
+  value_size = value_size_value;
+  soo_capacity = soo_capacity_value;
 }
 
 static bool ShouldForceSampling() {
@@ -115,12 +127,13 @@
 }
 
 HashtablezInfo* SampleSlow(SamplingState& next_sample,
-                           size_t inline_element_size) {
+                           size_t inline_element_size, size_t key_size,
+                           size_t value_size, uint16_t soo_capacity) {
   if (ABSL_PREDICT_FALSE(ShouldForceSampling())) {
     next_sample.next_sample = 1;
     const int64_t old_stride = exchange(next_sample.sample_stride, 1);
-    HashtablezInfo* result =
-        GlobalHashtablezSampler().Register(old_stride, inline_element_size);
+    HashtablezInfo* result = GlobalHashtablezSampler().Register(
+        old_stride, inline_element_size, key_size, value_size, soo_capacity);
     return result;
   }
 
@@ -150,10 +163,12 @@
   // that case.
   if (first) {
     if (ABSL_PREDICT_TRUE(--next_sample.next_sample > 0)) return nullptr;
-    return SampleSlow(next_sample, inline_element_size);
+    return SampleSlow(next_sample, inline_element_size, key_size, value_size,
+                      soo_capacity);
   }
 
-  return GlobalHashtablezSampler().Register(old_stride, inline_element_size);
+  return GlobalHashtablezSampler().Register(old_stride, inline_element_size,
+                                            key_size, value_size, soo_capacity);
 #endif
 }
 
diff --git a/absl/container/internal/hashtablez_sampler.h b/absl/container/internal/hashtablez_sampler.h
index e41ee2d..d74acf8 100644
--- a/absl/container/internal/hashtablez_sampler.h
+++ b/absl/container/internal/hashtablez_sampler.h
@@ -40,15 +40,20 @@
 #define ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_
 
 #include <atomic>
+#include <cstddef>
+#include <cstdint>
 #include <functional>
 #include <memory>
 #include <vector>
 
+#include "absl/base/attributes.h"
 #include "absl/base/config.h"
 #include "absl/base/internal/per_thread_tls.h"
 #include "absl/base/optimization.h"
+#include "absl/base/thread_annotations.h"
 #include "absl/profiling/internal/sample_recorder.h"
 #include "absl/synchronization/mutex.h"
+#include "absl/time/time.h"
 #include "absl/utility/utility.h"
 
 namespace absl {
@@ -67,7 +72,9 @@
 
   // Puts the object into a clean state, fills in the logically `const` members,
   // blocking for any readers that are currently sampling the object.
-  void PrepareForSampling(int64_t stride, size_t inline_element_size_value)
+  void PrepareForSampling(int64_t stride, size_t inline_element_size_value,
+                          size_t key_size, size_t value_size,
+                          uint16_t soo_capacity_value)
       ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu);
 
   // These fields are mutated by the various Record* APIs and need to be
@@ -91,8 +98,15 @@
   static constexpr int kMaxStackDepth = 64;
   absl::Time create_time;
   int32_t depth;
+  // The SOO capacity for this table in elements (not bytes). Note that sampled
+  // tables are never SOO because we need to store the infoz handle on the heap.
+  // Tables that would be SOO if not sampled should have: soo_capacity > 0 &&
+  // size <= soo_capacity && max_reserve <= soo_capacity.
+  uint16_t soo_capacity;
   void* stack[kMaxStackDepth];
-  size_t inline_element_size;  // How big is the slot?
+  size_t inline_element_size;  // How big is the slot in bytes?
+  size_t key_size;             // sizeof(key_type)
+  size_t value_size;           // sizeof(value_type)
 };
 
 void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length);
@@ -117,7 +131,8 @@
 };
 
 HashtablezInfo* SampleSlow(SamplingState& next_sample,
-                           size_t inline_element_size);
+                           size_t inline_element_size, size_t key_size,
+                           size_t value_size, uint16_t soo_capacity);
 void UnsampleSlow(HashtablezInfo* info);
 
 #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
@@ -204,16 +219,19 @@
 extern ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample;
 #endif  // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
 
-// Returns an RAII sampling handle that manages registration and unregistation
-// with the global sampler.
+// Returns a sampling handle.
 inline HashtablezInfoHandle Sample(
-    size_t inline_element_size ABSL_ATTRIBUTE_UNUSED) {
+    ABSL_ATTRIBUTE_UNUSED size_t inline_element_size,
+    ABSL_ATTRIBUTE_UNUSED size_t key_size,
+    ABSL_ATTRIBUTE_UNUSED size_t value_size,
+    ABSL_ATTRIBUTE_UNUSED uint16_t soo_capacity) {
 #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
   if (ABSL_PREDICT_TRUE(--global_next_sample.next_sample > 0)) {
     return HashtablezInfoHandle(nullptr);
   }
-  return HashtablezInfoHandle(
-      SampleSlow(global_next_sample, inline_element_size));
+  return HashtablezInfoHandle(SampleSlow(global_next_sample,
+                                         inline_element_size, key_size,
+                                         value_size, soo_capacity));
 #else
   return HashtablezInfoHandle(nullptr);
 #endif  // !ABSL_PER_THREAD_TLS
diff --git a/absl/container/internal/hashtablez_sampler_test.cc b/absl/container/internal/hashtablez_sampler_test.cc
index 8ebb08d..24d3bc4 100644
--- a/absl/container/internal/hashtablez_sampler_test.cc
+++ b/absl/container/internal/hashtablez_sampler_test.cc
@@ -15,8 +15,12 @@
 #include "absl/container/internal/hashtablez_sampler.h"
 
 #include <atomic>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
 #include <limits>
 #include <random>
+#include <vector>
 
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
@@ -67,7 +71,11 @@
 HashtablezInfo* Register(HashtablezSampler* s, size_t size) {
   const int64_t test_stride = 123;
   const size_t test_element_size = 17;
-  auto* info = s->Register(test_stride, test_element_size);
+  const size_t test_key_size = 3;
+  const size_t test_value_size = 5;
+  auto* info =
+      s->Register(test_stride, test_element_size, /*key_size=*/test_key_size,
+                  /*value_size=*/test_value_size, /*soo_capacity=*/0);
   assert(info != nullptr);
   info->size.store(size);
   return info;
@@ -77,9 +85,15 @@
   absl::Time test_start = absl::Now();
   const int64_t test_stride = 123;
   const size_t test_element_size = 17;
+  const size_t test_key_size = 15;
+  const size_t test_value_size = 13;
+
   HashtablezInfo info;
   absl::MutexLock l(&info.init_mu);
-  info.PrepareForSampling(test_stride, test_element_size);
+  info.PrepareForSampling(test_stride, test_element_size,
+                          /*key_size=*/test_key_size,
+                          /*value_size=*/test_value_size,
+                          /*soo_capacity_value=*/1);
 
   EXPECT_EQ(info.capacity.load(), 0);
   EXPECT_EQ(info.size.load(), 0);
@@ -94,6 +108,9 @@
   EXPECT_GE(info.create_time, test_start);
   EXPECT_EQ(info.weight, test_stride);
   EXPECT_EQ(info.inline_element_size, test_element_size);
+  EXPECT_EQ(info.key_size, test_key_size);
+  EXPECT_EQ(info.value_size, test_value_size);
+  EXPECT_EQ(info.soo_capacity, 1);
 
   info.capacity.store(1, std::memory_order_relaxed);
   info.size.store(1, std::memory_order_relaxed);
@@ -106,7 +123,10 @@
   info.max_reserve.store(1, std::memory_order_relaxed);
   info.create_time = test_start - absl::Hours(20);
 
-  info.PrepareForSampling(test_stride * 2, test_element_size);
+  info.PrepareForSampling(test_stride * 2, test_element_size,
+                          /*key_size=*/test_key_size,
+                          /*value_size=*/test_value_size,
+                          /*soo_capacity_value=*/0);
   EXPECT_EQ(info.capacity.load(), 0);
   EXPECT_EQ(info.size.load(), 0);
   EXPECT_EQ(info.num_erases.load(), 0);
@@ -119,7 +139,10 @@
   EXPECT_EQ(info.max_reserve.load(), 0);
   EXPECT_EQ(info.weight, 2 * test_stride);
   EXPECT_EQ(info.inline_element_size, test_element_size);
+  EXPECT_EQ(info.key_size, test_key_size);
+  EXPECT_EQ(info.value_size, test_value_size);
   EXPECT_GE(info.create_time, test_start);
+  EXPECT_EQ(info.soo_capacity, 0);
 }
 
 TEST(HashtablezInfoTest, RecordStorageChanged) {
@@ -127,7 +150,13 @@
   absl::MutexLock l(&info.init_mu);
   const int64_t test_stride = 21;
   const size_t test_element_size = 19;
-  info.PrepareForSampling(test_stride, test_element_size);
+  const size_t test_key_size = 17;
+  const size_t test_value_size = 15;
+
+  info.PrepareForSampling(test_stride, test_element_size,
+                          /*key_size=*/test_key_size,
+                          /*value_size=*/test_value_size,
+                          /*soo_capacity_value=*/0);
   RecordStorageChangedSlow(&info, 17, 47);
   EXPECT_EQ(info.size.load(), 17);
   EXPECT_EQ(info.capacity.load(), 47);
@@ -141,7 +170,13 @@
   absl::MutexLock l(&info.init_mu);
   const int64_t test_stride = 25;
   const size_t test_element_size = 23;
-  info.PrepareForSampling(test_stride, test_element_size);
+  const size_t test_key_size = 21;
+  const size_t test_value_size = 19;
+
+  info.PrepareForSampling(test_stride, test_element_size,
+                          /*key_size=*/test_key_size,
+                          /*value_size=*/test_value_size,
+                          /*soo_capacity_value=*/0);
   EXPECT_EQ(info.max_probe_length.load(), 0);
   RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength);
   EXPECT_EQ(info.max_probe_length.load(), 6);
@@ -163,9 +198,15 @@
 TEST(HashtablezInfoTest, RecordErase) {
   const int64_t test_stride = 31;
   const size_t test_element_size = 29;
+  const size_t test_key_size = 27;
+  const size_t test_value_size = 25;
+
   HashtablezInfo info;
   absl::MutexLock l(&info.init_mu);
-  info.PrepareForSampling(test_stride, test_element_size);
+  info.PrepareForSampling(test_stride, test_element_size,
+                          /*key_size=*/test_key_size,
+                          /*value_size=*/test_value_size,
+                          /*soo_capacity_value=*/1);
   EXPECT_EQ(info.num_erases.load(), 0);
   EXPECT_EQ(info.size.load(), 0);
   RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength);
@@ -174,14 +215,23 @@
   EXPECT_EQ(info.size.load(), 0);
   EXPECT_EQ(info.num_erases.load(), 1);
   EXPECT_EQ(info.inline_element_size, test_element_size);
+  EXPECT_EQ(info.key_size, test_key_size);
+  EXPECT_EQ(info.value_size, test_value_size);
+  EXPECT_EQ(info.soo_capacity, 1);
 }
 
 TEST(HashtablezInfoTest, RecordRehash) {
   const int64_t test_stride = 33;
   const size_t test_element_size = 31;
+  const size_t test_key_size = 29;
+  const size_t test_value_size = 27;
   HashtablezInfo info;
   absl::MutexLock l(&info.init_mu);
-  info.PrepareForSampling(test_stride, test_element_size);
+  info.PrepareForSampling(test_stride, test_element_size,
+                          /*key_size=*/test_key_size,
+                          /*value_size=*/test_value_size,
+
+                          /*soo_capacity_value=*/0);
   RecordInsertSlow(&info, 0x1, 0);
   RecordInsertSlow(&info, 0x2, kProbeLength);
   RecordInsertSlow(&info, 0x4, kProbeLength);
@@ -201,6 +251,9 @@
   EXPECT_EQ(info.num_erases.load(), 0);
   EXPECT_EQ(info.num_rehashes.load(), 1);
   EXPECT_EQ(info.inline_element_size, test_element_size);
+  EXPECT_EQ(info.key_size, test_key_size);
+  EXPECT_EQ(info.value_size, test_value_size);
+  EXPECT_EQ(info.soo_capacity, 0);
 }
 
 TEST(HashtablezInfoTest, RecordReservation) {
@@ -208,7 +261,14 @@
   absl::MutexLock l(&info.init_mu);
   const int64_t test_stride = 35;
   const size_t test_element_size = 33;
-  info.PrepareForSampling(test_stride, test_element_size);
+  const size_t test_key_size = 31;
+  const size_t test_value_size = 29;
+
+  info.PrepareForSampling(test_stride, test_element_size,
+                          /*key_size=*/test_key_size,
+                          /*value_size=*/test_value_size,
+
+                          /*soo_capacity_value=*/0);
   RecordReservationSlow(&info, 3);
   EXPECT_EQ(info.max_reserve.load(), 3);
 
@@ -224,12 +284,19 @@
 #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
 TEST(HashtablezSamplerTest, SmallSampleParameter) {
   const size_t test_element_size = 31;
+  const size_t test_key_size = 33;
+  const size_t test_value_size = 35;
+
   SetHashtablezEnabled(true);
   SetHashtablezSampleParameter(100);
 
   for (int i = 0; i < 1000; ++i) {
     SamplingState next_sample = {0, 0};
-    HashtablezInfo* sample = SampleSlow(next_sample, test_element_size);
+    HashtablezInfo* sample =
+        SampleSlow(next_sample, test_element_size,
+                   /*key_size=*/test_key_size, /*value_size=*/test_value_size,
+
+                   /*soo_capacity=*/0);
     EXPECT_GT(next_sample.next_sample, 0);
     EXPECT_EQ(next_sample.next_sample, next_sample.sample_stride);
     EXPECT_NE(sample, nullptr);
@@ -239,12 +306,17 @@
 
 TEST(HashtablezSamplerTest, LargeSampleParameter) {
   const size_t test_element_size = 31;
+  const size_t test_key_size = 33;
+  const size_t test_value_size = 35;
   SetHashtablezEnabled(true);
   SetHashtablezSampleParameter(std::numeric_limits<int32_t>::max());
 
   for (int i = 0; i < 1000; ++i) {
     SamplingState next_sample = {0, 0};
-    HashtablezInfo* sample = SampleSlow(next_sample, test_element_size);
+    HashtablezInfo* sample =
+        SampleSlow(next_sample, test_element_size,
+                   /*key_size=*/test_key_size, /*value_size=*/test_value_size,
+                   /*soo_capacity=*/0);
     EXPECT_GT(next_sample.next_sample, 0);
     EXPECT_EQ(next_sample.next_sample, next_sample.sample_stride);
     EXPECT_NE(sample, nullptr);
@@ -254,13 +326,20 @@
 
 TEST(HashtablezSamplerTest, Sample) {
   const size_t test_element_size = 31;
+  const size_t test_key_size = 33;
+  const size_t test_value_size = 35;
   SetHashtablezEnabled(true);
   SetHashtablezSampleParameter(100);
   int64_t num_sampled = 0;
   int64_t total = 0;
   double sample_rate = 0.0;
   for (int i = 0; i < 1000000; ++i) {
-    HashtablezInfoHandle h = Sample(test_element_size);
+    HashtablezInfoHandle h =
+        Sample(test_element_size,
+               /*key_size=*/test_key_size, /*value_size=*/test_value_size,
+
+               /*soo_capacity=*/0);
+
     ++total;
     if (h.IsSampled()) {
       ++num_sampled;
@@ -275,7 +354,12 @@
   auto& sampler = GlobalHashtablezSampler();
   const int64_t test_stride = 41;
   const size_t test_element_size = 39;
-  HashtablezInfoHandle h(sampler.Register(test_stride, test_element_size));
+  const size_t test_key_size = 37;
+  const size_t test_value_size = 35;
+  HashtablezInfoHandle h(sampler.Register(test_stride, test_element_size,
+                                          /*key_size=*/test_key_size,
+                                          /*value_size=*/test_value_size,
+                                          /*soo_capacity=*/0));
   auto* info = HashtablezInfoHandlePeer::GetInfo(&h);
   info->hashes_bitwise_and.store(0x12345678, std::memory_order_relaxed);
 
@@ -351,18 +435,28 @@
   for (int i = 0; i < 10; ++i) {
     const int64_t sampling_stride = 11 + i % 3;
     const size_t elt_size = 10 + i % 2;
-    pool.Schedule([&sampler, &stop, sampling_stride, elt_size]() {
+    const size_t key_size = 12 + i % 4;
+    const size_t value_size = 13 + i % 5;
+    pool.Schedule([&sampler, &stop, sampling_stride, elt_size, key_size,
+                   value_size]() {
       std::random_device rd;
       std::mt19937 gen(rd());
 
       std::vector<HashtablezInfo*> infoz;
       while (!stop.HasBeenNotified()) {
         if (infoz.empty()) {
-          infoz.push_back(sampler.Register(sampling_stride, elt_size));
+          infoz.push_back(sampler.Register(sampling_stride, elt_size,
+                                           /*key_size=*/key_size,
+                                           /*value_size=*/value_size,
+                                           /*soo_capacity=*/0));
         }
         switch (std::uniform_int_distribution<>(0, 2)(gen)) {
           case 0: {
-            infoz.push_back(sampler.Register(sampling_stride, elt_size));
+            infoz.push_back(sampler.Register(sampling_stride, elt_size,
+                                             /*key_size=*/key_size,
+                                             /*value_size=*/value_size,
+
+                                             /*soo_capacity=*/0));
             break;
           }
           case 1: {
diff --git a/absl/container/internal/inlined_vector.h b/absl/container/internal/inlined_vector.h
index 0eb9c34..2f24e46 100644
--- a/absl/container/internal/inlined_vector.h
+++ b/absl/container/internal/inlined_vector.h
@@ -27,6 +27,7 @@
 
 #include "absl/base/attributes.h"
 #include "absl/base/config.h"
+#include "absl/base/internal/identity.h"
 #include "absl/base/macros.h"
 #include "absl/container/internal/compressed_tuple.h"
 #include "absl/memory/memory.h"
@@ -82,16 +83,6 @@
 template <typename A>
 using IsSwapOk = absl::type_traits_internal::IsSwappable<ValueType<A>>;
 
-template <typename T>
-struct TypeIdentity {
-  using type = T;
-};
-
-// Used for function arguments in template functions to prevent ADL by forcing
-// callers to explicitly specify the template parameter.
-template <typename T>
-using NoTypeDeduction = typename TypeIdentity<T>::type;
-
 template <typename A, bool IsTriviallyDestructible =
                           absl::is_trivially_destructible<ValueType<A>>::value>
 struct DestroyAdapter;
@@ -139,7 +130,7 @@
 };
 
 template <typename A, typename ValueAdapter>
-void ConstructElements(NoTypeDeduction<A>& allocator,
+void ConstructElements(absl::internal::type_identity_t<A>& allocator,
                        Pointer<A> construct_first, ValueAdapter& values,
                        SizeType<A> construct_size) {
   for (SizeType<A> i = 0; i < construct_size; ++i) {
@@ -322,14 +313,13 @@
 
   // The policy to be used specifically when swapping inlined elements.
   using SwapInlinedElementsPolicy = absl::conditional_t<
-      // Fast path: if the value type can be trivially move constructed/assigned
-      // and destroyed, and we know the allocator doesn't do anything fancy,
-      // then it's safe for us to simply swap the bytes in the inline storage.
-      // It's as if we had move-constructed a temporary vector, move-assigned
-      // one to the other, then move-assigned the first from the temporary.
-      absl::conjunction<absl::is_trivially_move_constructible<ValueType<A>>,
-                        absl::is_trivially_move_assignable<ValueType<A>>,
-                        absl::is_trivially_destructible<ValueType<A>>,
+      // Fast path: if the value type can be trivially relocated, and we
+      // know the allocator doesn't do anything fancy, then it's safe for us
+      // to simply swap the bytes in the inline storage. It's as if we had
+      // relocated the first vector's elements into temporary storage,
+      // relocated the second's elements into the (now-empty) first's,
+      // and then relocated from temporary storage into the second.
+      absl::conjunction<absl::is_trivially_relocatable<ValueType<A>>,
                         std::is_same<A, std::allocator<ValueType<A>>>>::value,
       MemcpyPolicy,
       absl::conditional_t<IsSwapOk<A>::value, ElementwiseSwapPolicy,
@@ -624,8 +614,8 @@
 
 template <typename T, size_t N, typename A>
 template <typename ValueAdapter>
-auto Storage<T, N, A>::Initialize(ValueAdapter values, SizeType<A> new_size)
-    -> void {
+auto Storage<T, N, A>::Initialize(ValueAdapter values,
+                                  SizeType<A> new_size) -> void {
   // Only callable from constructors!
   ABSL_HARDENING_ASSERT(!GetIsAllocated());
   ABSL_HARDENING_ASSERT(GetSize() == 0);
@@ -656,8 +646,8 @@
 
 template <typename T, size_t N, typename A>
 template <typename ValueAdapter>
-auto Storage<T, N, A>::Assign(ValueAdapter values, SizeType<A> new_size)
-    -> void {
+auto Storage<T, N, A>::Assign(ValueAdapter values,
+                              SizeType<A> new_size) -> void {
   StorageView<A> storage_view = MakeStorageView();
 
   AllocationTransaction<A> allocation_tx(GetAllocator());
@@ -699,8 +689,8 @@
 
 template <typename T, size_t N, typename A>
 template <typename ValueAdapter>
-auto Storage<T, N, A>::Resize(ValueAdapter values, SizeType<A> new_size)
-    -> void {
+auto Storage<T, N, A>::Resize(ValueAdapter values,
+                              SizeType<A> new_size) -> void {
   StorageView<A> storage_view = MakeStorageView();
   Pointer<A> const base = storage_view.data;
   const SizeType<A> size = storage_view.size;
@@ -885,8 +875,8 @@
 }
 
 template <typename T, size_t N, typename A>
-auto Storage<T, N, A>::Erase(ConstIterator<A> from, ConstIterator<A> to)
-    -> Iterator<A> {
+auto Storage<T, N, A>::Erase(ConstIterator<A> from,
+                             ConstIterator<A> to) -> Iterator<A> {
   StorageView<A> storage_view = MakeStorageView();
 
   auto erase_size = static_cast<SizeType<A>>(std::distance(from, to));
@@ -894,16 +884,30 @@
       std::distance(ConstIterator<A>(storage_view.data), from));
   SizeType<A> erase_end_index = erase_index + erase_size;
 
-  IteratorValueAdapter<A, MoveIterator<A>> move_values(
-      MoveIterator<A>(storage_view.data + erase_end_index));
+  // Fast path: if the value type is trivially relocatable and we know
+  // the allocator doesn't do anything fancy, then we know it is legal for us to
+  // simply destroy the elements in the "erasure window" (which cannot throw)
+  // and then memcpy downward to close the window.
+  if (absl::is_trivially_relocatable<ValueType<A>>::value &&
+      std::is_nothrow_destructible<ValueType<A>>::value &&
+      std::is_same<A, std::allocator<ValueType<A>>>::value) {
+    DestroyAdapter<A>::DestroyElements(
+        GetAllocator(), storage_view.data + erase_index, erase_size);
+    std::memmove(
+        reinterpret_cast<char*>(storage_view.data + erase_index),
+        reinterpret_cast<const char*>(storage_view.data + erase_end_index),
+        (storage_view.size - erase_end_index) * sizeof(ValueType<A>));
+  } else {
+    IteratorValueAdapter<A, MoveIterator<A>> move_values(
+        MoveIterator<A>(storage_view.data + erase_end_index));
 
-  AssignElements<A>(storage_view.data + erase_index, move_values,
-                    storage_view.size - erase_end_index);
+    AssignElements<A>(storage_view.data + erase_index, move_values,
+                      storage_view.size - erase_end_index);
 
-  DestroyAdapter<A>::DestroyElements(
-      GetAllocator(), storage_view.data + (storage_view.size - erase_size),
-      erase_size);
-
+    DestroyAdapter<A>::DestroyElements(
+        GetAllocator(), storage_view.data + (storage_view.size - erase_size),
+        erase_size);
+  }
   SubtractSize(erase_size);
   return Iterator<A>(storage_view.data + erase_index);
 }
diff --git a/absl/container/internal/layout.h b/absl/container/internal/layout.h
index a4ba610..384929a 100644
--- a/absl/container/internal/layout.h
+++ b/absl/container/internal/layout.h
@@ -81,9 +81,30 @@
 //   }
 //
 // The layout we used above combines fixed-size with dynamically-sized fields.
-// This is quite common. Layout is optimized for this use case and generates
-// optimal code. All computations that can be performed at compile time are
-// indeed performed at compile time.
+// This is quite common. Layout is optimized for this use case and attempts to
+// generate optimal code. To help the compiler do that in more cases, you can
+// specify the fixed sizes using `WithStaticSizes`. This ensures that all
+// computations that can be performed at compile time are indeed performed at
+// compile time. Note that sometimes the `template` keyword is needed. E.g.:
+//
+//   using SL = L::template WithStaticSizes<1, 1>;
+//
+//   void Use(unsigned char* p) {
+//     // First, extract N and M.
+//     // Using `prefix` we can access the first three arrays but not more.
+//     //
+//     // More details: The first element always has offset 0. `SL`
+//     // has offsets for the second and third array based on sizes of
+//     // the first and second array, specified via `WithStaticSizes`.
+//     constexpr auto prefix = SL::Partial();
+//     size_t n = *prefix.Pointer<0>(p);
+//     size_t m = *prefix.Pointer<1>(p);
+//
+//     // Now we can get a pointer to the final payload.
+//     const SL layout(n, m);
+//     double* a = layout.Pointer<double>(p);
+//     int* b = layout.Pointer<int>(p);
+//   }
 //
 // Efficiency tip: The order of fields matters. In `Layout<T1, ..., TN>` try to
 // ensure that `alignof(T1) >= ... >= alignof(TN)`. This way you'll have no
@@ -107,7 +128,7 @@
 //     CompactString(const char* s = "") {
 //       const size_t size = strlen(s);
 //       // size_t[1] followed by char[size + 1].
-//       const L layout(1, size + 1);
+//       const L layout(size + 1);
 //       p_.reset(new unsigned char[layout.AllocSize()]);
 //       // If running under ASAN, mark the padding bytes, if any, to catch
 //       // memory errors.
@@ -125,14 +146,13 @@
 //
 //     const char* c_str() const {
 //       // Equivalent to reinterpret_cast<char*>(p.get() + sizeof(size_t)).
-//       // The argument in Partial(1) specifies that we have size_t[1] in front
-//       // of the characters.
-//       return L::Partial(1).Pointer<char>(p_.get());
+//       return L::Partial().Pointer<char>(p_.get());
 //     }
 //
 //    private:
-//     // Our heap allocation contains a size_t followed by an array of chars.
-//     using L = Layout<size_t, char>;
+//     // Our heap allocation contains a single size_t followed by an array of
+//     // chars.
+//     using L = Layout<size_t, char>::WithStaticSizes<1>;
 //     std::unique_ptr<unsigned char[]> p_;
 //   };
 //
@@ -146,11 +166,12 @@
 //
 // The interface exported by this file consists of:
 // - class `Layout<>` and its public members.
-// - The public members of class `internal_layout::LayoutImpl<>`. That class
-//   isn't intended to be used directly, and its name and template parameter
-//   list are internal implementation details, but the class itself provides
-//   most of the functionality in this file. See comments on its members for
-//   detailed documentation.
+// - The public members of classes `internal_layout::LayoutWithStaticSizes<>`
+//   and `internal_layout::LayoutImpl<>`. Those classes aren't intended to be
+//   used directly, and their name and template parameter list are internal
+//   implementation details, but the classes themselves provide most of the
+//   functionality in this file. See comments on their members for detailed
+//   documentation.
 //
 // `Layout<T1,... Tn>::Partial(count1,..., countm)` (where `m` <= `n`) returns a
 // `LayoutImpl<>` object. `Layout<T1,..., Tn> layout(count1,..., countn)`
@@ -164,13 +185,14 @@
 #include <stddef.h>
 #include <stdint.h>
 
-#include <ostream>
+#include <array>
 #include <string>
 #include <tuple>
 #include <type_traits>
 #include <typeinfo>
 #include <utility>
 
+#include "absl/base/attributes.h"
 #include "absl/base/config.h"
 #include "absl/debugging/internal/demangle.h"
 #include "absl/meta/type_traits.h"
@@ -209,9 +231,6 @@
 template <size_t>
 using IntToSize = size_t;
 
-template <class>
-using TypeToSize = size_t;
-
 template <class T>
 struct Type : NotAligned<T> {
   using type = T;
@@ -308,7 +327,8 @@
               !std::is_volatile<typename Type<T>::type>::value &&
               adl_barrier::IsPow2(AlignOf<T>::value)>;
 
-template <class Elements, class SizeSeq, class OffsetSeq>
+template <class Elements, class StaticSizeSeq, class RuntimeSizeSeq,
+          class SizeSeq, class OffsetSeq>
 class LayoutImpl;
 
 // Public base class of `Layout` and the result type of `Layout::Partial()`.
@@ -316,31 +336,49 @@
 // `Elements...` contains all template arguments of `Layout` that created this
 // instance.
 //
-// `SizeSeq...` is `[0, NumSizes)` where `NumSizes` is the number of arguments
-// passed to `Layout::Partial()` or `Layout::Layout()`.
+// `StaticSizeSeq...` is an index_sequence containing the sizes specified at
+// compile-time.
+//
+// `RuntimeSizeSeq...` is `[0, NumRuntimeSizes)`, where `NumRuntimeSizes` is the
+// number of arguments passed to `Layout::Partial()` or `Layout::Layout()`.
+//
+// `SizeSeq...` is `[0, NumSizes)` where `NumSizes` is `NumRuntimeSizes` plus
+// the number of sizes in `StaticSizeSeq`.
 //
 // `OffsetSeq...` is `[0, NumOffsets)` where `NumOffsets` is
 // `Min(sizeof...(Elements), NumSizes + 1)` (the number of arrays for which we
 // can compute offsets).
-template <class... Elements, size_t... SizeSeq, size_t... OffsetSeq>
-class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
-                 absl::index_sequence<OffsetSeq...>> {
+template <class... Elements, size_t... StaticSizeSeq, size_t... RuntimeSizeSeq,
+          size_t... SizeSeq, size_t... OffsetSeq>
+class LayoutImpl<
+    std::tuple<Elements...>, absl::index_sequence<StaticSizeSeq...>,
+    absl::index_sequence<RuntimeSizeSeq...>, absl::index_sequence<SizeSeq...>,
+    absl::index_sequence<OffsetSeq...>> {
  private:
   static_assert(sizeof...(Elements) > 0, "At least one field is required");
   static_assert(absl::conjunction<IsLegalElementType<Elements>...>::value,
                 "Invalid element type (see IsLegalElementType)");
+  static_assert(sizeof...(StaticSizeSeq) <= sizeof...(Elements),
+                "Too many static sizes specified");
 
   enum {
     NumTypes = sizeof...(Elements),
+    NumStaticSizes = sizeof...(StaticSizeSeq),
+    NumRuntimeSizes = sizeof...(RuntimeSizeSeq),
     NumSizes = sizeof...(SizeSeq),
     NumOffsets = sizeof...(OffsetSeq),
   };
 
   // These are guaranteed by `Layout`.
+  static_assert(NumStaticSizes + NumRuntimeSizes == NumSizes, "Internal error");
+  static_assert(NumSizes <= NumTypes, "Internal error");
   static_assert(NumOffsets == adl_barrier::Min(NumTypes, NumSizes + 1),
                 "Internal error");
   static_assert(NumTypes > 0, "Internal error");
 
+  static constexpr std::array<size_t, sizeof...(StaticSizeSeq)> kStaticSizes = {
+      StaticSizeSeq...};
+
   // Returns the index of `T` in `Elements...`. Results in a compilation error
   // if `Elements...` doesn't contain exactly one instance of `T`.
   template <class T>
@@ -363,7 +401,7 @@
   template <size_t N>
   using ElementType = typename std::tuple_element<N, ElementTypes>::type;
 
-  constexpr explicit LayoutImpl(IntToSize<SizeSeq>... sizes)
+  constexpr explicit LayoutImpl(IntToSize<RuntimeSizeSeq>... sizes)
       : size_{sizes...} {}
 
   // Alignment of the layout, equal to the strictest alignment of all elements.
@@ -389,7 +427,7 @@
   constexpr size_t Offset() const {
     static_assert(N < NumOffsets, "Index out of bounds");
     return adl_barrier::Align(
-        Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * size_[N - 1],
+        Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * Size<N - 1>(),
         ElementAlignment<N>::value);
   }
 
@@ -411,8 +449,7 @@
     return {{Offset<OffsetSeq>()...}};
   }
 
-  // The number of elements in the Nth array. This is the Nth argument of
-  // `Layout::Partial()` or `Layout::Layout()` (zero-based).
+  // The number of elements in the Nth array (zero-based).
   //
   //   // int[3], 4 bytes of padding, double[4].
   //   Layout<int, double> x(3, 4);
@@ -420,10 +457,15 @@
   //   assert(x.Size<1>() == 4);
   //
   // Requires: `N < NumSizes`.
-  template <size_t N>
+  template <size_t N, EnableIf<(N < NumStaticSizes)> = 0>
+  constexpr size_t Size() const {
+    return kStaticSizes[N];
+  }
+
+  template <size_t N, EnableIf<(N >= NumStaticSizes)> = 0>
   constexpr size_t Size() const {
     static_assert(N < NumSizes, "Index out of bounds");
-    return size_[N];
+    return size_[N - NumStaticSizes];
   }
 
   // The number of elements in the array with the specified element type.
@@ -500,13 +542,8 @@
   //   std::tie(ints, doubles) = x.Pointers(p);
   //
   // Requires: `p` is aligned to `Alignment()`.
-  //
-  // Note: We're not using ElementType alias here because it does not compile
-  // under MSVC.
   template <class Char>
-  std::tuple<CopyConst<
-      Char, typename std::tuple_element<OffsetSeq, ElementTypes>::type>*...>
-  Pointers(Char* p) const {
+  auto Pointers(Char* p) const {
     return std::tuple<CopyConst<Char, ElementType<OffsetSeq>>*...>(
         Pointer<OffsetSeq>(p)...);
   }
@@ -559,15 +596,10 @@
   //
   // Requires: `p` is aligned to `Alignment()`.
   //
-  // Note: We're not using ElementType alias here because it does not compile
-  // under MSVC.
+  // Note: We mark the parameter as unused because GCC detects it is not used
+  // when `SizeSeq` is empty [-Werror=unused-but-set-parameter].
   template <class Char>
-  std::tuple<SliceType<CopyConst<
-      Char, typename std::tuple_element<SizeSeq, ElementTypes>::type>>...>
-  Slices(Char* p) const {
-    // Workaround for https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63875 (fixed
-    // in 6.1).
-    (void)p;
+  auto Slices(ABSL_ATTRIBUTE_UNUSED Char* p) const {
     return std::tuple<SliceType<CopyConst<Char, ElementType<SizeSeq>>>...>(
         Slice<SizeSeq>(p)...);
   }
@@ -582,7 +614,7 @@
   constexpr size_t AllocSize() const {
     static_assert(NumTypes == NumSizes, "You must specify sizes of all fields");
     return Offset<NumTypes - 1>() +
-        SizeOf<ElementType<NumTypes - 1>>::value * size_[NumTypes - 1];
+           SizeOf<ElementType<NumTypes - 1>>::value * Size<NumTypes - 1>();
   }
 
   // If built with --config=asan, poisons padding bytes (if any) in the
@@ -606,7 +638,7 @@
     // The `if` is an optimization. It doesn't affect the observable behaviour.
     if (ElementAlignment<N - 1>::value % ElementAlignment<N>::value) {
       size_t start =
-          Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * size_[N - 1];
+          Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * Size<N - 1>();
       ASAN_POISON_MEMORY_REGION(p + start, Offset<N>() - start);
     }
 #endif
@@ -635,47 +667,66 @@
         adl_barrier::TypeName<ElementType<OffsetSeq>>()...};
     std::string res = absl::StrCat("@0", types[0], "(", sizes[0], ")");
     for (size_t i = 0; i != NumOffsets - 1; ++i) {
-      absl::StrAppend(&res, "[", size_[i], "]; @", offsets[i + 1], types[i + 1],
-                      "(", sizes[i + 1], ")");
+      absl::StrAppend(&res, "[", DebugSize(i), "]; @", offsets[i + 1],
+                      types[i + 1], "(", sizes[i + 1], ")");
     }
     // NumSizes is a constant that may be zero. Some compilers cannot see that
     // inside the if statement "size_[NumSizes - 1]" must be valid.
     int last = static_cast<int>(NumSizes) - 1;
     if (NumTypes == NumSizes && last >= 0) {
-      absl::StrAppend(&res, "[", size_[last], "]");
+      absl::StrAppend(&res, "[", DebugSize(static_cast<size_t>(last)), "]");
     }
     return res;
   }
 
  private:
+  size_t DebugSize(size_t n) const {
+    if (n < NumStaticSizes) {
+      return kStaticSizes[n];
+    } else {
+      return size_[n - NumStaticSizes];
+    }
+  }
+
   // Arguments of `Layout::Partial()` or `Layout::Layout()`.
-  size_t size_[NumSizes > 0 ? NumSizes : 1];
+  size_t size_[NumRuntimeSizes > 0 ? NumRuntimeSizes : 1];
 };
 
-template <size_t NumSizes, class... Ts>
+// Defining a constexpr static class member variable is redundant and deprecated
+// in C++17, but required in C++14.
+template <class... Elements, size_t... StaticSizeSeq, size_t... RuntimeSizeSeq,
+          size_t... SizeSeq, size_t... OffsetSeq>
+constexpr std::array<size_t, sizeof...(StaticSizeSeq)> LayoutImpl<
+    std::tuple<Elements...>, absl::index_sequence<StaticSizeSeq...>,
+    absl::index_sequence<RuntimeSizeSeq...>, absl::index_sequence<SizeSeq...>,
+    absl::index_sequence<OffsetSeq...>>::kStaticSizes;
+
+template <class StaticSizeSeq, size_t NumRuntimeSizes, class... Ts>
 using LayoutType = LayoutImpl<
-    std::tuple<Ts...>, absl::make_index_sequence<NumSizes>,
-    absl::make_index_sequence<adl_barrier::Min(sizeof...(Ts), NumSizes + 1)>>;
+    std::tuple<Ts...>, StaticSizeSeq,
+    absl::make_index_sequence<NumRuntimeSizes>,
+    absl::make_index_sequence<NumRuntimeSizes + StaticSizeSeq::size()>,
+    absl::make_index_sequence<adl_barrier::Min(
+        sizeof...(Ts), NumRuntimeSizes + StaticSizeSeq::size() + 1)>>;
 
-}  // namespace internal_layout
+template <class StaticSizeSeq, class... Ts>
+class LayoutWithStaticSizes
+    : public LayoutType<StaticSizeSeq,
+                        sizeof...(Ts) - adl_barrier::Min(sizeof...(Ts),
+                                                         StaticSizeSeq::size()),
+                        Ts...> {
+ private:
+  using Super =
+      LayoutType<StaticSizeSeq,
+                 sizeof...(Ts) -
+                     adl_barrier::Min(sizeof...(Ts), StaticSizeSeq::size()),
+                 Ts...>;
 
-// Descriptor of arrays of various types and sizes laid out in memory one after
-// another. See the top of the file for documentation.
-//
-// Check out the public API of internal_layout::LayoutImpl above. The type is
-// internal to the library but its methods are public, and they are inherited
-// by `Layout`.
-template <class... Ts>
-class Layout : public internal_layout::LayoutType<sizeof...(Ts), Ts...> {
  public:
-  static_assert(sizeof...(Ts) > 0, "At least one field is required");
-  static_assert(
-      absl::conjunction<internal_layout::IsLegalElementType<Ts>...>::value,
-      "Invalid element type (see IsLegalElementType)");
-
   // The result type of `Partial()` with `NumSizes` arguments.
   template <size_t NumSizes>
-  using PartialType = internal_layout::LayoutType<NumSizes, Ts...>;
+  using PartialType =
+      internal_layout::LayoutType<StaticSizeSeq, NumSizes, Ts...>;
 
   // `Layout` knows the element types of the arrays we want to lay out in
   // memory but not the number of elements in each array.
@@ -701,14 +752,18 @@
   // Note: The sizes of the arrays must be specified in number of elements,
   // not in bytes.
   //
-  // Requires: `sizeof...(Sizes) <= sizeof...(Ts)`.
+  // Requires: `sizeof...(Sizes) + NumStaticSizes <= sizeof...(Ts)`.
   // Requires: all arguments are convertible to `size_t`.
   template <class... Sizes>
   static constexpr PartialType<sizeof...(Sizes)> Partial(Sizes&&... sizes) {
-    static_assert(sizeof...(Sizes) <= sizeof...(Ts), "");
-    return PartialType<sizeof...(Sizes)>(absl::forward<Sizes>(sizes)...);
+    static_assert(sizeof...(Sizes) + StaticSizeSeq::size() <= sizeof...(Ts),
+                  "");
+    return PartialType<sizeof...(Sizes)>(
+        static_cast<size_t>(std::forward<Sizes>(sizes))...);
   }
 
+  // Inherit LayoutType's constructor.
+  //
   // Creates a layout with the sizes of all arrays specified. If you know
   // only the sizes of the first N arrays (where N can be zero), you can use
   // `Partial()` defined above. The constructor is essentially equivalent to
@@ -717,8 +772,69 @@
   //
   // Note: The sizes of the arrays must be specified in number of elements,
   // not in bytes.
-  constexpr explicit Layout(internal_layout::TypeToSize<Ts>... sizes)
-      : internal_layout::LayoutType<sizeof...(Ts), Ts...>(sizes...) {}
+  //
+  // Implementation note: we do this via a `using` declaration instead of
+  // defining our own explicit constructor because the signature of LayoutType's
+  // constructor depends on RuntimeSizeSeq, which we don't have access to here.
+  // If we defined our own constructor here, it would have to use a parameter
+  // pack and then cast the arguments to size_t when calling the superclass
+  // constructor, similar to what Partial() does. But that would suffer from the
+  // same problem that Partial() has, which is that the parameter types are
+  // inferred from the arguments, which may be signed types, which must then be
+  // cast to size_t. This can lead to negative values being silently (i.e. with
+  // no compiler warnings) cast to an unsigned type. Having a constructor with
+  // size_t parameters helps the compiler generate better warnings about
+  // potential bad casts, while avoiding false warnings when positive literal
+  // arguments are used. If an argument is a positive literal integer (e.g.
+  // `1`), the compiler will understand that it can be safely converted to
+  // size_t, and hence not generate a warning. But if a negative literal (e.g.
+  // `-1`) or a variable with signed type is used, then it can generate a
+  // warning about a potentially unsafe implicit cast. It would be great if we
+  // could do this for Partial() too, but unfortunately as of C++23 there seems
+  // to be no way to define a function with a variable number of parameters of a
+  // certain type, a.k.a. homogeneous function parameter packs. So we're forced
+  // to choose between explicitly casting the arguments to size_t, which
+  // suppresses all warnings, even potentially valid ones, or implicitly casting
+  // them to size_t, which generates bogus warnings whenever literal arguments
+  // are used, even if they're positive.
+  using Super::Super;
+};
+
+}  // namespace internal_layout
+
+// Descriptor of arrays of various types and sizes laid out in memory one after
+// another. See the top of the file for documentation.
+//
+// Check out the public API of internal_layout::LayoutWithStaticSizes and
+// internal_layout::LayoutImpl above. Those types are internal to the library
+// but their methods are public, and they are inherited by `Layout`.
+template <class... Ts>
+class Layout : public internal_layout::LayoutWithStaticSizes<
+                   absl::make_index_sequence<0>, Ts...> {
+ private:
+  using Super =
+      internal_layout::LayoutWithStaticSizes<absl::make_index_sequence<0>,
+                                             Ts...>;
+
+ public:
+  // If you know the sizes of some or all of the arrays at compile time, you can
+  // use `WithStaticSizes` or `WithStaticSizeSequence` to create a `Layout` type
+  // with those sizes baked in. This can help the compiler generate optimal code
+  // for calculating array offsets and AllocSize().
+  //
+  // Like `Partial()`, the N sizes you specify are for the first N arrays, and
+  // they specify the number of elements in each array, not the number of bytes.
+  template <class StaticSizeSeq>
+  using WithStaticSizeSequence =
+      internal_layout::LayoutWithStaticSizes<StaticSizeSeq, Ts...>;
+
+  template <size_t... StaticSizes>
+  using WithStaticSizes =
+      WithStaticSizeSequence<std::index_sequence<StaticSizes...>>;
+
+  // Inherit LayoutWithStaticSizes's constructor, which requires you to specify
+  // all the array sizes.
+  using Super::Super;
 };
 
 }  // namespace container_internal
diff --git a/absl/container/internal/layout_benchmark.cc b/absl/container/internal/layout_benchmark.cc
index 3af35e3..d6f2669 100644
--- a/absl/container/internal/layout_benchmark.cc
+++ b/absl/container/internal/layout_benchmark.cc
@@ -15,6 +15,9 @@
 // Every benchmark should have the same performance as the corresponding
 // headroom benchmark.
 
+#include <cstddef>
+#include <cstdint>
+
 #include "absl/base/internal/raw_logging.h"
 #include "absl/container/internal/layout.h"
 #include "benchmark/benchmark.h"
@@ -28,6 +31,8 @@
 
 using Int128 = int64_t[2];
 
+constexpr size_t MyAlign(size_t n, size_t m) { return (n + m - 1) & ~(m - 1); }
+
 // This benchmark provides the upper bound on performance for BM_OffsetConstant.
 template <size_t Offset, class... Ts>
 void BM_OffsetConstantHeadroom(benchmark::State& state) {
@@ -37,6 +42,15 @@
 }
 
 template <size_t Offset, class... Ts>
+void BM_OffsetConstantStatic(benchmark::State& state) {
+  using L = typename Layout<Ts...>::template WithStaticSizes<3, 5, 7>;
+  ABSL_RAW_CHECK(L::Partial().template Offset<3>() == Offset, "Invalid offset");
+  for (auto _ : state) {
+    DoNotOptimize(L::Partial().template Offset<3>());
+  }
+}
+
+template <size_t Offset, class... Ts>
 void BM_OffsetConstant(benchmark::State& state) {
   using L = Layout<Ts...>;
   ABSL_RAW_CHECK(L::Partial(3, 5, 7).template Offset<3>() == Offset,
@@ -46,14 +60,74 @@
   }
 }
 
+template <size_t Offset, class... Ts>
+void BM_OffsetConstantIndirect(benchmark::State& state) {
+  using L = Layout<Ts...>;
+  auto p = L::Partial(3, 5, 7);
+  ABSL_RAW_CHECK(p.template Offset<3>() == Offset, "Invalid offset");
+  for (auto _ : state) {
+    DoNotOptimize(p);
+    DoNotOptimize(p.template Offset<3>());
+  }
+}
+
+template <class... Ts>
+size_t PartialOffset(size_t k);
+
+template <>
+size_t PartialOffset<int8_t, int16_t, int32_t, Int128>(size_t k) {
+  constexpr size_t o = MyAlign(MyAlign(3 * 1, 2) + 5 * 2, 4);
+  return MyAlign(o + k * 4, 8);
+}
+
+template <>
+size_t PartialOffset<Int128, int32_t, int16_t, int8_t>(size_t k) {
+  // No alignment is necessary.
+  return 3 * 16 + 5 * 4 + k * 2;
+}
+
+// This benchmark provides the upper bound on performance for BM_OffsetVariable.
+template <size_t Offset, class... Ts>
+void BM_OffsetPartialHeadroom(benchmark::State& state) {
+  size_t k = 7;
+  ABSL_RAW_CHECK(PartialOffset<Ts...>(k) == Offset, "Invalid offset");
+  for (auto _ : state) {
+    DoNotOptimize(k);
+    DoNotOptimize(PartialOffset<Ts...>(k));
+  }
+}
+
+template <size_t Offset, class... Ts>
+void BM_OffsetPartialStatic(benchmark::State& state) {
+  using L = typename Layout<Ts...>::template WithStaticSizes<3, 5>;
+  size_t k = 7;
+  ABSL_RAW_CHECK(L::Partial(k).template Offset<3>() == Offset,
+                 "Invalid offset");
+  for (auto _ : state) {
+    DoNotOptimize(k);
+    DoNotOptimize(L::Partial(k).template Offset<3>());
+  }
+}
+
+template <size_t Offset, class... Ts>
+void BM_OffsetPartial(benchmark::State& state) {
+  using L = Layout<Ts...>;
+  size_t k = 7;
+  ABSL_RAW_CHECK(L::Partial(3, 5, k).template Offset<3>() == Offset,
+                 "Invalid offset");
+  for (auto _ : state) {
+    DoNotOptimize(k);
+    DoNotOptimize(L::Partial(3, 5, k).template Offset<3>());
+  }
+}
+
 template <class... Ts>
 size_t VariableOffset(size_t n, size_t m, size_t k);
 
 template <>
 size_t VariableOffset<int8_t, int16_t, int32_t, Int128>(size_t n, size_t m,
                                                         size_t k) {
-  auto Align = [](size_t n, size_t m) { return (n + m - 1) & ~(m - 1); };
-  return Align(Align(Align(n * 1, 2) + m * 2, 4) + k * 4, 8);
+  return MyAlign(MyAlign(MyAlign(n * 1, 2) + m * 2, 4) + k * 4, 8);
 }
 
 template <>
@@ -94,6 +168,75 @@
   }
 }
 
+template <class... Ts>
+size_t AllocSize(size_t x);
+
+template <>
+size_t AllocSize<int8_t, int16_t, int32_t, Int128>(size_t x) {
+  constexpr size_t o =
+      Layout<int8_t, int16_t, int32_t, Int128>::Partial(3, 5, 7)
+          .template Offset<Int128>();
+  return o + sizeof(Int128) * x;
+}
+
+template <>
+size_t AllocSize<Int128, int32_t, int16_t, int8_t>(size_t x) {
+  constexpr size_t o =
+      Layout<Int128, int32_t, int16_t, int8_t>::Partial(3, 5, 7)
+          .template Offset<int8_t>();
+  return o + sizeof(int8_t) * x;
+}
+
+// This benchmark provides the upper bound on performance for BM_AllocSize
+template <size_t Size, class... Ts>
+void BM_AllocSizeHeadroom(benchmark::State& state) {
+  size_t x = 9;
+  ABSL_RAW_CHECK(AllocSize<Ts...>(x) == Size, "Invalid size");
+  for (auto _ : state) {
+    DoNotOptimize(x);
+    DoNotOptimize(AllocSize<Ts...>(x));
+  }
+}
+
+template <size_t Size, class... Ts>
+void BM_AllocSizeStatic(benchmark::State& state) {
+  using L = typename Layout<Ts...>::template WithStaticSizes<3, 5, 7>;
+  size_t x = 9;
+  ABSL_RAW_CHECK(L(x).AllocSize() == Size, "Invalid offset");
+  for (auto _ : state) {
+    DoNotOptimize(x);
+    DoNotOptimize(L(x).AllocSize());
+  }
+}
+
+template <size_t Size, class... Ts>
+void BM_AllocSize(benchmark::State& state) {
+  using L = Layout<Ts...>;
+  size_t n = 3;
+  size_t m = 5;
+  size_t k = 7;
+  size_t x = 9;
+  ABSL_RAW_CHECK(L(n, m, k, x).AllocSize() == Size, "Invalid offset");
+  for (auto _ : state) {
+    DoNotOptimize(n);
+    DoNotOptimize(m);
+    DoNotOptimize(k);
+    DoNotOptimize(x);
+    DoNotOptimize(L(n, m, k, x).AllocSize());
+  }
+}
+
+template <size_t Size, class... Ts>
+void BM_AllocSizeIndirect(benchmark::State& state) {
+  using L = Layout<Ts...>;
+  auto l = L(3, 5, 7, 9);
+  ABSL_RAW_CHECK(l.AllocSize() == Size, "Invalid offset");
+  for (auto _ : state) {
+    DoNotOptimize(l);
+    DoNotOptimize(l.AllocSize());
+  }
+}
+
 // Run all benchmarks in two modes:
 //
 //   Layout with padding: int8_t[3], int16_t[5], int32_t[7], Int128[?].
@@ -106,16 +249,46 @@
 
 OFFSET_BENCHMARK(BM_OffsetConstantHeadroom, 48, int8_t, int16_t, int32_t,
                  Int128);
+OFFSET_BENCHMARK(BM_OffsetConstantStatic, 48, int8_t, int16_t, int32_t, Int128);
 OFFSET_BENCHMARK(BM_OffsetConstant, 48, int8_t, int16_t, int32_t, Int128);
+OFFSET_BENCHMARK(BM_OffsetConstantIndirect, 48, int8_t, int16_t, int32_t,
+                 Int128);
+
 OFFSET_BENCHMARK(BM_OffsetConstantHeadroom, 82, Int128, int32_t, int16_t,
                  int8_t);
+OFFSET_BENCHMARK(BM_OffsetConstantStatic, 82, Int128, int32_t, int16_t, int8_t);
 OFFSET_BENCHMARK(BM_OffsetConstant, 82, Int128, int32_t, int16_t, int8_t);
+OFFSET_BENCHMARK(BM_OffsetConstantIndirect, 82, Int128, int32_t, int16_t,
+                 int8_t);
+
+OFFSET_BENCHMARK(BM_OffsetPartialHeadroom, 48, int8_t, int16_t, int32_t,
+                 Int128);
+OFFSET_BENCHMARK(BM_OffsetPartialStatic, 48, int8_t, int16_t, int32_t, Int128);
+OFFSET_BENCHMARK(BM_OffsetPartial, 48, int8_t, int16_t, int32_t, Int128);
+
+OFFSET_BENCHMARK(BM_OffsetPartialHeadroom, 82, Int128, int32_t, int16_t,
+                 int8_t);
+OFFSET_BENCHMARK(BM_OffsetPartialStatic, 82, Int128, int32_t, int16_t, int8_t);
+OFFSET_BENCHMARK(BM_OffsetPartial, 82, Int128, int32_t, int16_t, int8_t);
+
 OFFSET_BENCHMARK(BM_OffsetVariableHeadroom, 48, int8_t, int16_t, int32_t,
                  Int128);
 OFFSET_BENCHMARK(BM_OffsetVariable, 48, int8_t, int16_t, int32_t, Int128);
+
 OFFSET_BENCHMARK(BM_OffsetVariableHeadroom, 82, Int128, int32_t, int16_t,
                  int8_t);
 OFFSET_BENCHMARK(BM_OffsetVariable, 82, Int128, int32_t, int16_t, int8_t);
+
+OFFSET_BENCHMARK(BM_AllocSizeHeadroom, 192, int8_t, int16_t, int32_t, Int128);
+OFFSET_BENCHMARK(BM_AllocSizeStatic, 192, int8_t, int16_t, int32_t, Int128);
+OFFSET_BENCHMARK(BM_AllocSize, 192, int8_t, int16_t, int32_t, Int128);
+OFFSET_BENCHMARK(BM_AllocSizeIndirect, 192, int8_t, int16_t, int32_t, Int128);
+
+OFFSET_BENCHMARK(BM_AllocSizeHeadroom, 91, Int128, int32_t, int16_t, int8_t);
+OFFSET_BENCHMARK(BM_AllocSizeStatic, 91, Int128, int32_t, int16_t, int8_t);
+OFFSET_BENCHMARK(BM_AllocSize, 91, Int128, int32_t, int16_t, int8_t);
+OFFSET_BENCHMARK(BM_AllocSizeIndirect, 91, Int128, int32_t, int16_t, int8_t);
+
 }  // namespace
 }  // namespace container_internal
 ABSL_NAMESPACE_END
diff --git a/absl/container/internal/layout_test.cc b/absl/container/internal/layout_test.cc
index ae55cf7..47fc9f3 100644
--- a/absl/container/internal/layout_test.cc
+++ b/absl/container/internal/layout_test.cc
@@ -68,9 +68,7 @@
 // int64_t is *not* 8-byte aligned on all platforms!
 struct alignas(8) Int64 {
   int64_t a;
-  friend bool operator==(Int64 lhs, Int64 rhs) {
-    return lhs.a == rhs.a;
-  }
+  friend bool operator==(Int64 lhs, Int64 rhs) { return lhs.a == rhs.a; }
 };
 
 // Properties of types that this test relies on.
@@ -271,6 +269,35 @@
   }
 }
 
+TEST(Layout, StaticOffsets) {
+  using L = Layout<int8_t, int32_t, Int128>;
+  {
+    using SL = L::WithStaticSizes<>;
+    EXPECT_THAT(SL::Partial().Offsets(), ElementsAre(0));
+    EXPECT_THAT(SL::Partial(5).Offsets(), ElementsAre(0, 8));
+    EXPECT_THAT(SL::Partial(5, 3, 1).Offsets(), ElementsAre(0, 8, 24));
+    EXPECT_THAT(SL(5, 3, 1).Offsets(), ElementsAre(0, 8, 24));
+  }
+  {
+    using SL = L::WithStaticSizes<5>;
+    EXPECT_THAT(SL::Partial().Offsets(), ElementsAre(0, 8));
+    EXPECT_THAT(SL::Partial(3).Offsets(), ElementsAre(0, 8, 24));
+    EXPECT_THAT(SL::Partial(3, 1).Offsets(), ElementsAre(0, 8, 24));
+    EXPECT_THAT(SL(3, 1).Offsets(), ElementsAre(0, 8, 24));
+  }
+  {
+    using SL = L::WithStaticSizes<5, 3>;
+    EXPECT_THAT(SL::Partial().Offsets(), ElementsAre(0, 8, 24));
+    EXPECT_THAT(SL::Partial(1).Offsets(), ElementsAre(0, 8, 24));
+    EXPECT_THAT(SL(1).Offsets(), ElementsAre(0, 8, 24));
+  }
+  {
+    using SL = L::WithStaticSizes<5, 3, 1>;
+    EXPECT_THAT(SL::Partial().Offsets(), ElementsAre(0, 8, 24));
+    EXPECT_THAT(SL().Offsets(), ElementsAre(0, 8, 24));
+  }
+}
+
 TEST(Layout, AllocSize) {
   {
     using L = Layout<int32_t>;
@@ -295,6 +322,30 @@
   }
 }
 
+TEST(Layout, StaticAllocSize) {
+  using L = Layout<int8_t, int32_t, Int128>;
+  {
+    using SL = L::WithStaticSizes<>;
+    EXPECT_EQ(136, SL::Partial(3, 5, 7).AllocSize());
+    EXPECT_EQ(136, SL(3, 5, 7).AllocSize());
+  }
+  {
+    using SL = L::WithStaticSizes<3>;
+    EXPECT_EQ(136, SL::Partial(5, 7).AllocSize());
+    EXPECT_EQ(136, SL(5, 7).AllocSize());
+  }
+  {
+    using SL = L::WithStaticSizes<3, 5>;
+    EXPECT_EQ(136, SL::Partial(7).AllocSize());
+    EXPECT_EQ(136, SL(7).AllocSize());
+  }
+  {
+    using SL = L::WithStaticSizes<3, 5, 7>;
+    EXPECT_EQ(136, SL::Partial().AllocSize());
+    EXPECT_EQ(136, SL().AllocSize());
+  }
+}
+
 TEST(Layout, SizeByIndex) {
   {
     using L = Layout<int32_t>;
@@ -370,6 +421,27 @@
   }
 }
 
+TEST(Layout, StaticSize) {
+  using L = Layout<int8_t, int32_t, Int128>;
+  {
+    using SL = L::WithStaticSizes<>;
+    EXPECT_THAT(SL::Partial().Sizes(), ElementsAre());
+    EXPECT_THAT(SL::Partial(3).Size<0>(), 3);
+    EXPECT_THAT(SL::Partial(3).Size<int8_t>(), 3);
+    EXPECT_THAT(SL::Partial(3).Sizes(), ElementsAre(3));
+    EXPECT_THAT(SL::Partial(3, 5, 7).Size<0>(), 3);
+    EXPECT_THAT(SL::Partial(3, 5, 7).Size<int8_t>(), 3);
+    EXPECT_THAT(SL::Partial(3, 5, 7).Size<2>(), 7);
+    EXPECT_THAT(SL::Partial(3, 5, 7).Size<Int128>(), 7);
+    EXPECT_THAT(SL::Partial(3, 5, 7).Sizes(), ElementsAre(3, 5, 7));
+    EXPECT_THAT(SL(3, 5, 7).Size<0>(), 3);
+    EXPECT_THAT(SL(3, 5, 7).Size<int8_t>(), 3);
+    EXPECT_THAT(SL(3, 5, 7).Size<2>(), 7);
+    EXPECT_THAT(SL(3, 5, 7).Size<Int128>(), 7);
+    EXPECT_THAT(SL(3, 5, 7).Sizes(), ElementsAre(3, 5, 7));
+  }
+}
+
 TEST(Layout, PointerByIndex) {
   alignas(max_align_t) const unsigned char p[100] = {0};
   {
@@ -720,6 +792,61 @@
   }
 }
 
+TEST(Layout, StaticPointers) {
+  alignas(max_align_t) const unsigned char p[100] = {0};
+  using L = Layout<int8_t, int8_t, Int128>;
+  {
+    const auto x = L::WithStaticSizes<>::Partial();
+    EXPECT_EQ(std::make_tuple(x.Pointer<0>(p)),
+              Type<std::tuple<const int8_t*>>(x.Pointers(p)));
+  }
+  {
+    const auto x = L::WithStaticSizes<>::Partial(1);
+    EXPECT_EQ(std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p)),
+              (Type<std::tuple<const int8_t*, const int8_t*>>(x.Pointers(p))));
+  }
+  {
+    const auto x = L::WithStaticSizes<1>::Partial();
+    EXPECT_EQ(std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p)),
+              (Type<std::tuple<const int8_t*, const int8_t*>>(x.Pointers(p))));
+  }
+  {
+    const auto x = L::WithStaticSizes<>::Partial(1, 2, 3);
+    EXPECT_EQ(
+        std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
+        (Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
+            x.Pointers(p))));
+  }
+  {
+    const auto x = L::WithStaticSizes<1>::Partial(2, 3);
+    EXPECT_EQ(
+        std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
+        (Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
+            x.Pointers(p))));
+  }
+  {
+    const auto x = L::WithStaticSizes<1, 2>::Partial(3);
+    EXPECT_EQ(
+        std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
+        (Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
+            x.Pointers(p))));
+  }
+  {
+    const auto x = L::WithStaticSizes<1, 2, 3>::Partial();
+    EXPECT_EQ(
+        std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
+        (Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
+            x.Pointers(p))));
+  }
+  {
+    const L::WithStaticSizes<1, 2, 3> x;
+    EXPECT_EQ(
+        std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
+        (Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
+            x.Pointers(p))));
+  }
+}
+
 TEST(Layout, SliceByIndexSize) {
   alignas(max_align_t) const unsigned char p[100] = {0};
   {
@@ -769,7 +896,6 @@
     EXPECT_EQ(7, L(3, 5, 7).Slice<Int128>(p).size());
   }
 }
-
 TEST(Layout, MutableSliceByIndexSize) {
   alignas(max_align_t) unsigned char p[100] = {0};
   {
@@ -820,6 +946,39 @@
   }
 }
 
+TEST(Layout, StaticSliceSize) {
+  alignas(max_align_t) const unsigned char cp[100] = {0};
+  alignas(max_align_t) unsigned char p[100] = {0};
+  using L = Layout<int8_t, int32_t, Int128>;
+  using SL = L::WithStaticSizes<3, 5>;
+
+  EXPECT_EQ(3, SL::Partial().Slice<0>(cp).size());
+  EXPECT_EQ(3, SL::Partial().Slice<int8_t>(cp).size());
+  EXPECT_EQ(3, SL::Partial(7).Slice<0>(cp).size());
+  EXPECT_EQ(3, SL::Partial(7).Slice<int8_t>(cp).size());
+
+  EXPECT_EQ(5, SL::Partial().Slice<1>(cp).size());
+  EXPECT_EQ(5, SL::Partial().Slice<int32_t>(cp).size());
+  EXPECT_EQ(5, SL::Partial(7).Slice<1>(cp).size());
+  EXPECT_EQ(5, SL::Partial(7).Slice<int32_t>(cp).size());
+
+  EXPECT_EQ(7, SL::Partial(7).Slice<2>(cp).size());
+  EXPECT_EQ(7, SL::Partial(7).Slice<Int128>(cp).size());
+
+  EXPECT_EQ(3, SL::Partial().Slice<0>(p).size());
+  EXPECT_EQ(3, SL::Partial().Slice<int8_t>(p).size());
+  EXPECT_EQ(3, SL::Partial(7).Slice<0>(p).size());
+  EXPECT_EQ(3, SL::Partial(7).Slice<int8_t>(p).size());
+
+  EXPECT_EQ(5, SL::Partial().Slice<1>(p).size());
+  EXPECT_EQ(5, SL::Partial().Slice<int32_t>(p).size());
+  EXPECT_EQ(5, SL::Partial(7).Slice<1>(p).size());
+  EXPECT_EQ(5, SL::Partial(7).Slice<int32_t>(p).size());
+
+  EXPECT_EQ(7, SL::Partial(7).Slice<2>(p).size());
+  EXPECT_EQ(7, SL::Partial(7).Slice<Int128>(p).size());
+}
+
 TEST(Layout, SliceByIndexData) {
   alignas(max_align_t) const unsigned char p[100] = {0};
   {
@@ -1230,6 +1389,39 @@
   }
 }
 
+TEST(Layout, StaticSliceData) {
+  alignas(max_align_t) const unsigned char cp[100] = {0};
+  alignas(max_align_t) unsigned char p[100] = {0};
+  using L = Layout<int8_t, int32_t, Int128>;
+  using SL = L::WithStaticSizes<3, 5>;
+
+  EXPECT_EQ(0, Distance(cp, SL::Partial().Slice<0>(cp).data()));
+  EXPECT_EQ(0, Distance(cp, SL::Partial().Slice<int8_t>(cp).data()));
+  EXPECT_EQ(0, Distance(cp, SL::Partial(7).Slice<0>(cp).data()));
+  EXPECT_EQ(0, Distance(cp, SL::Partial(7).Slice<int8_t>(cp).data()));
+
+  EXPECT_EQ(4, Distance(cp, SL::Partial().Slice<1>(cp).data()));
+  EXPECT_EQ(4, Distance(cp, SL::Partial().Slice<int32_t>(cp).data()));
+  EXPECT_EQ(4, Distance(cp, SL::Partial(7).Slice<1>(cp).data()));
+  EXPECT_EQ(4, Distance(cp, SL::Partial(7).Slice<int32_t>(cp).data()));
+
+  EXPECT_EQ(24, Distance(cp, SL::Partial(7).Slice<2>(cp).data()));
+  EXPECT_EQ(24, Distance(cp, SL::Partial(7).Slice<Int128>(cp).data()));
+
+  EXPECT_EQ(0, Distance(p, SL::Partial().Slice<0>(p).data()));
+  EXPECT_EQ(0, Distance(p, SL::Partial().Slice<int8_t>(p).data()));
+  EXPECT_EQ(0, Distance(p, SL::Partial(7).Slice<0>(p).data()));
+  EXPECT_EQ(0, Distance(p, SL::Partial(7).Slice<int8_t>(p).data()));
+
+  EXPECT_EQ(4, Distance(p, SL::Partial().Slice<1>(p).data()));
+  EXPECT_EQ(4, Distance(p, SL::Partial().Slice<int32_t>(p).data()));
+  EXPECT_EQ(4, Distance(p, SL::Partial(7).Slice<1>(p).data()));
+  EXPECT_EQ(4, Distance(p, SL::Partial(7).Slice<int32_t>(p).data()));
+
+  EXPECT_EQ(24, Distance(p, SL::Partial(7).Slice<2>(p).data()));
+  EXPECT_EQ(24, Distance(p, SL::Partial(7).Slice<Int128>(p).data()));
+}
+
 MATCHER_P(IsSameSlice, slice, "") {
   return arg.size() == slice.size() && arg.data() == slice.data();
 }
@@ -1339,6 +1531,43 @@
   }
 }
 
+TEST(Layout, StaticSlices) {
+  alignas(max_align_t) const unsigned char cp[100] = {0};
+  alignas(max_align_t) unsigned char p[100] = {0};
+  using SL = Layout<int8_t, int8_t, Int128>::WithStaticSizes<1, 2>;
+  {
+    const auto x = SL::Partial();
+    EXPECT_THAT(
+        (Type<std::tuple<Span<const int8_t>, Span<const int8_t>>>(
+            x.Slices(cp))),
+        Tuple(IsSameSlice(x.Slice<0>(cp)), IsSameSlice(x.Slice<1>(cp))));
+    EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>>>(x.Slices(p))),
+                Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p))));
+  }
+  {
+    const auto x = SL::Partial(3);
+    EXPECT_THAT((Type<std::tuple<Span<const int8_t>, Span<const int8_t>,
+                                 Span<const Int128>>>(x.Slices(cp))),
+                Tuple(IsSameSlice(x.Slice<0>(cp)), IsSameSlice(x.Slice<1>(cp)),
+                      IsSameSlice(x.Slice<2>(cp))));
+    EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(
+                    x.Slices(p))),
+                Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
+                      IsSameSlice(x.Slice<2>(p))));
+  }
+  {
+    const SL x(3);
+    EXPECT_THAT((Type<std::tuple<Span<const int8_t>, Span<const int8_t>,
+                                 Span<const Int128>>>(x.Slices(cp))),
+                Tuple(IsSameSlice(x.Slice<0>(cp)), IsSameSlice(x.Slice<1>(cp)),
+                      IsSameSlice(x.Slice<2>(cp))));
+    EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(
+                    x.Slices(p))),
+                Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
+                      IsSameSlice(x.Slice<2>(p))));
+  }
+}
+
 TEST(Layout, UnalignedTypes) {
   constexpr Layout<unsigned char, unsigned char, unsigned char> x(1, 2, 3);
   alignas(max_align_t) unsigned char p[x.AllocSize() + 1];
@@ -1377,6 +1606,36 @@
   static_assert(Layout<int32_t, Int64, int8_t>::Alignment() == 8, "");
   static_assert(Layout<Int64, int8_t, int32_t>::Alignment() == 8, "");
   static_assert(Layout<Int64, int32_t, int8_t>::Alignment() == 8, "");
+  static_assert(Layout<Int64, int32_t, int8_t>::Alignment() == 8, "");
+  static_assert(
+      Layout<Aligned<int8_t, 64>>::WithStaticSizes<>::Alignment() == 64, "");
+  static_assert(
+      Layout<Aligned<int8_t, 64>>::WithStaticSizes<2>::Alignment() == 64, "");
+}
+
+TEST(Layout, StaticAlignment) {
+  static_assert(Layout<int8_t>::WithStaticSizes<>::Alignment() == 1, "");
+  static_assert(Layout<int8_t>::WithStaticSizes<0>::Alignment() == 1, "");
+  static_assert(Layout<int8_t>::WithStaticSizes<7>::Alignment() == 1, "");
+  static_assert(Layout<int32_t>::WithStaticSizes<>::Alignment() == 4, "");
+  static_assert(Layout<int32_t>::WithStaticSizes<0>::Alignment() == 4, "");
+  static_assert(Layout<int32_t>::WithStaticSizes<3>::Alignment() == 4, "");
+  static_assert(
+      Layout<Aligned<int8_t, 64>>::WithStaticSizes<>::Alignment() == 64, "");
+  static_assert(
+      Layout<Aligned<int8_t, 64>>::WithStaticSizes<0>::Alignment() == 64, "");
+  static_assert(
+      Layout<Aligned<int8_t, 64>>::WithStaticSizes<2>::Alignment() == 64, "");
+  static_assert(
+      Layout<int32_t, Int64, int8_t>::WithStaticSizes<>::Alignment() == 8, "");
+  static_assert(
+      Layout<int32_t, Int64, int8_t>::WithStaticSizes<0, 0, 0>::Alignment() ==
+          8,
+      "");
+  static_assert(
+      Layout<int32_t, Int64, int8_t>::WithStaticSizes<1, 1, 1>::Alignment() ==
+          8,
+      "");
 }
 
 TEST(Layout, ConstexprPartial) {
@@ -1384,6 +1643,15 @@
   constexpr Layout<unsigned char, Aligned<unsigned char, 2 * M>> x(1, 3);
   static_assert(x.Partial(1).template Offset<1>() == 2 * M, "");
 }
+
+TEST(Layout, StaticConstexpr) {
+  constexpr size_t M = alignof(max_align_t);
+  using L = Layout<unsigned char, Aligned<unsigned char, 2 * M>>;
+  using SL = L::WithStaticSizes<1, 3>;
+  constexpr SL x;
+  static_assert(x.Offset<1>() == 2 * M, "");
+}
+
 // [from, to)
 struct Region {
   size_t from;
@@ -1458,6 +1726,41 @@
   }
 }
 
+TEST(Layout, StaticPoisonPadding) {
+  using L = Layout<int8_t, Int64, int32_t, Int128>;
+  using SL = L::WithStaticSizes<1, 2>;
+
+  constexpr size_t n = L::Partial(1, 2, 3, 4).AllocSize();
+  {
+    constexpr auto x = SL::Partial();
+    alignas(max_align_t) const unsigned char c[n] = {};
+    x.PoisonPadding(c);
+    EXPECT_EQ(x.Slices(c), x.Slices(c));
+    ExpectPoisoned(c, {{1, 8}});
+  }
+  {
+    constexpr auto x = SL::Partial(3);
+    alignas(max_align_t) const unsigned char c[n] = {};
+    x.PoisonPadding(c);
+    EXPECT_EQ(x.Slices(c), x.Slices(c));
+    ExpectPoisoned(c, {{1, 8}, {36, 40}});
+  }
+  {
+    constexpr auto x = SL::Partial(3, 4);
+    alignas(max_align_t) const unsigned char c[n] = {};
+    x.PoisonPadding(c);
+    EXPECT_EQ(x.Slices(c), x.Slices(c));
+    ExpectPoisoned(c, {{1, 8}, {36, 40}});
+  }
+  {
+    constexpr SL x(3, 4);
+    alignas(max_align_t) const unsigned char c[n] = {};
+    x.PoisonPadding(c);
+    EXPECT_EQ(x.Slices(c), x.Slices(c));
+    ExpectPoisoned(c, {{1, 8}, {36, 40}});
+  }
+}
+
 TEST(Layout, DebugString) {
   {
     constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial();
@@ -1500,6 +1803,62 @@
   }
 }
 
+TEST(Layout, StaticDebugString) {
+  {
+    constexpr auto x =
+        Layout<int8_t, int32_t, int8_t, Int128>::WithStaticSizes<>::Partial();
+    EXPECT_EQ("@0<signed char>(1)", x.DebugString());
+  }
+  {
+    constexpr auto x =
+        Layout<int8_t, int32_t, int8_t, Int128>::WithStaticSizes<>::Partial(1);
+    EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)", x.DebugString());
+  }
+  {
+    constexpr auto x =
+        Layout<int8_t, int32_t, int8_t, Int128>::WithStaticSizes<1>::Partial();
+    EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)", x.DebugString());
+  }
+  {
+    constexpr auto x =
+        Layout<int8_t, int32_t, int8_t, Int128>::WithStaticSizes<>::Partial(1,
+                                                                            2);
+    EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)",
+              x.DebugString());
+  }
+  {
+    constexpr auto x =
+        Layout<int8_t, int32_t, int8_t, Int128>::WithStaticSizes<1>::Partial(2);
+    EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)",
+              x.DebugString());
+  }
+  {
+    constexpr auto x = Layout<int8_t, int32_t, int8_t,
+                              Int128>::WithStaticSizes<1, 2>::Partial();
+    EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)",
+              x.DebugString());
+  }
+  {
+    constexpr auto x = Layout<int8_t, int32_t, int8_t,
+                              Int128>::WithStaticSizes<1, 2, 3, 4>::Partial();
+    EXPECT_EQ(
+        "@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
+        "@16" +
+            Int128::Name() + "(16)[4]",
+        x.DebugString());
+  }
+  {
+    constexpr Layout<int8_t, int32_t, int8_t, Int128>::WithStaticSizes<1, 2, 3,
+                                                                       4>
+        x;
+    EXPECT_EQ(
+        "@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
+        "@16" +
+            Int128::Name() + "(16)[4]",
+        x.DebugString());
+  }
+}
+
 TEST(Layout, CharTypes) {
   constexpr Layout<int32_t> x(1);
   alignas(max_align_t) char c[x.AllocSize()] = {};
@@ -1638,6 +1997,35 @@
   EXPECT_STREQ("hello", s.c_str());
 }
 
+// Same as the previous CompactString example, except we set the first array
+// size to 1 statically, since we know it is always 1. This allows us to compute
+// the offset of the character array at compile time.
+class StaticCompactString {
+ public:
+  StaticCompactString(const char* s = "") {  // NOLINT
+    const size_t size = strlen(s);
+    const SL layout(size + 1);
+    p_.reset(new unsigned char[layout.AllocSize()]);
+    layout.PoisonPadding(p_.get());
+    *layout.Pointer<size_t>(p_.get()) = size;
+    memcpy(layout.Pointer<char>(p_.get()), s, size + 1);
+  }
+
+  size_t size() const { return *SL::Partial().Pointer<size_t>(p_.get()); }
+
+  const char* c_str() const { return SL::Partial().Pointer<char>(p_.get()); }
+
+ private:
+  using SL = Layout<size_t, char>::WithStaticSizes<1>;
+  std::unique_ptr<unsigned char[]> p_;
+};
+
+TEST(StaticCompactString, Works) {
+  StaticCompactString s = "hello";
+  EXPECT_EQ(5, s.size());
+  EXPECT_STREQ("hello", s.c_str());
+}
+
 }  // namespace example
 
 }  // namespace
diff --git a/absl/container/internal/raw_hash_map.h b/absl/container/internal/raw_hash_map.h
index 97182bc..464bf23 100644
--- a/absl/container/internal/raw_hash_map.h
+++ b/absl/container/internal/raw_hash_map.h
@@ -198,22 +198,24 @@
   std::pair<iterator, bool> insert_or_assign_impl(K&& k, V&& v)
       ABSL_ATTRIBUTE_LIFETIME_BOUND {
     auto res = this->find_or_prepare_insert(k);
-    if (res.second)
+    if (res.second) {
       this->emplace_at(res.first, std::forward<K>(k), std::forward<V>(v));
-    else
-      Policy::value(&*this->iterator_at(res.first)) = std::forward<V>(v);
-    return {this->iterator_at(res.first), res.second};
+    } else {
+      Policy::value(&*res.first) = std::forward<V>(v);
+    }
+    return res;
   }
 
   template <class K = key_type, class... Args>
   std::pair<iterator, bool> try_emplace_impl(K&& k, Args&&... args)
       ABSL_ATTRIBUTE_LIFETIME_BOUND {
     auto res = this->find_or_prepare_insert(k);
-    if (res.second)
+    if (res.second) {
       this->emplace_at(res.first, std::piecewise_construct,
                        std::forward_as_tuple(std::forward<K>(k)),
                        std::forward_as_tuple(std::forward<Args>(args)...));
-    return {this->iterator_at(res.first), res.second};
+    }
+    return res;
   }
 };
 
diff --git a/absl/container/internal/raw_hash_set.cc b/absl/container/internal/raw_hash_set.cc
index 9f8ea51..1cae038 100644
--- a/absl/container/internal/raw_hash_set.cc
+++ b/absl/container/internal/raw_hash_set.cc
@@ -23,19 +23,24 @@
 #include "absl/base/attributes.h"
 #include "absl/base/config.h"
 #include "absl/base/dynamic_annotations.h"
+#include "absl/base/internal/endian.h"
+#include "absl/base/optimization.h"
 #include "absl/container/internal/container_memory.h"
+#include "absl/container/internal/hashtablez_sampler.h"
 #include "absl/hash/hash.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 namespace container_internal {
 
-// We have space for `growth_left` before a single block of control bytes. A
+// Represents a control byte corresponding to a full slot with arbitrary hash.
+constexpr ctrl_t ZeroCtrlT() { return static_cast<ctrl_t>(0); }
+
+// We have space for `growth_info` before a single block of control bytes. A
 // single block of empty control bytes for tables without any slots allocated.
 // This enables removing a branch in the hot path of find(). In order to ensure
 // that the control bytes are aligned to 16, we have 16 bytes before the control
-// bytes even though growth_left only needs 8.
-constexpr ctrl_t ZeroCtrlT() { return static_cast<ctrl_t>(0); }
+// bytes even though growth_info only needs 8.
 alignas(16) ABSL_CONST_INIT ABSL_DLL const ctrl_t kEmptyGroup[32] = {
     ZeroCtrlT(),       ZeroCtrlT(),    ZeroCtrlT(),    ZeroCtrlT(),
     ZeroCtrlT(),       ZeroCtrlT(),    ZeroCtrlT(),    ZeroCtrlT(),
@@ -46,6 +51,18 @@
     ctrl_t::kEmpty,    ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
     ctrl_t::kEmpty,    ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty};
 
+// We need one full byte followed by a sentinel byte for iterator::operator++ to
+// work. We have a full group after kSentinel to be safe (in case operator++ is
+// changed to read a full group).
+ABSL_CONST_INIT ABSL_DLL const ctrl_t kSooControl[17] = {
+    ZeroCtrlT(),    ctrl_t::kSentinel, ZeroCtrlT(),    ctrl_t::kEmpty,
+    ctrl_t::kEmpty, ctrl_t::kEmpty,    ctrl_t::kEmpty, ctrl_t::kEmpty,
+    ctrl_t::kEmpty, ctrl_t::kEmpty,    ctrl_t::kEmpty, ctrl_t::kEmpty,
+    ctrl_t::kEmpty, ctrl_t::kEmpty,    ctrl_t::kEmpty, ctrl_t::kEmpty,
+    ctrl_t::kEmpty};
+static_assert(NumControlBytes(SooCapacity()) <= 17,
+              "kSooControl capacity too small");
+
 #ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
 constexpr size_t Group::kWidth;
 #endif
@@ -104,10 +121,25 @@
   return ShouldRehashForBugDetection(ctrl, capacity);
 }
 
-bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl) {
+bool ShouldInsertBackwardsForDebug(size_t capacity, size_t hash,
+                                   const ctrl_t* ctrl) {
   // To avoid problems with weak hashes and single bit tests, we use % 13.
   // TODO(kfm,sbenza): revisit after we do unconditional mixing
-  return (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6;
+  return !is_small(capacity) && (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6;
+}
+
+size_t PrepareInsertAfterSoo(size_t hash, size_t slot_size,
+                             CommonFields& common) {
+  assert(common.capacity() == NextCapacity(SooCapacity()));
+  // After resize from capacity 1 to 3, we always have exactly the slot with
+  // index 1 occupied, so we need to insert either at index 0 or index 2.
+  assert(HashSetResizeHelper::SooSlotIndex() == 1);
+  PrepareInsertCommon(common);
+  const size_t offset = H1(hash, common.control()) & 2;
+  common.growth_info().OverwriteEmptyAsFull();
+  SetCtrlInSingleGroupTable(common, offset, H2(hash), slot_size);
+  common.infoz().RecordInsert(hash, /*distance_from_desired=*/0);
+  return offset;
 }
 
 void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity) {
@@ -128,6 +160,8 @@
   return find_first_non_full(common, hash);
 }
 
+namespace {
+
 // Returns the address of the slot just after slot assuming each slot has the
 // specified size.
 static inline void* NextSlot(void* slot, size_t slot_size) {
@@ -140,8 +174,22 @@
   return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot) - slot_size);
 }
 
+// Finds guaranteed to exists empty slot from the given position.
+// NOTE: this function is almost never triggered inside of the
+// DropDeletesWithoutResize, so we keep it simple.
+// The table is rather sparse, so empty slot will be found very quickly.
+size_t FindEmptySlot(size_t start, size_t end, const ctrl_t* ctrl) {
+  for (size_t i = start; i < end; ++i) {
+    if (IsEmpty(ctrl[i])) {
+      return i;
+    }
+  }
+  assert(false && "no empty slot");
+  return ~size_t{};
+}
+
 void DropDeletesWithoutResize(CommonFields& common,
-                              const PolicyFunctions& policy, void* tmp_space) {
+                              const PolicyFunctions& policy) {
   void* set = &common;
   void* slot_array = common.slot_array();
   const size_t capacity = common.capacity();
@@ -165,17 +213,28 @@
   //       repeat procedure for current slot with moved from element (target)
   ctrl_t* ctrl = common.control();
   ConvertDeletedToEmptyAndFullToDeleted(ctrl, capacity);
+  const void* hash_fn = policy.hash_fn(common);
   auto hasher = policy.hash_slot;
   auto transfer = policy.transfer;
   const size_t slot_size = policy.slot_size;
 
   size_t total_probe_length = 0;
   void* slot_ptr = SlotAddress(slot_array, 0, slot_size);
+
+  // The index of an empty slot that can be used as temporary memory for
+  // the swap operation.
+  constexpr size_t kUnknownId = ~size_t{};
+  size_t tmp_space_id = kUnknownId;
+
   for (size_t i = 0; i != capacity;
        ++i, slot_ptr = NextSlot(slot_ptr, slot_size)) {
     assert(slot_ptr == SlotAddress(slot_array, i, slot_size));
+    if (IsEmpty(ctrl[i])) {
+      tmp_space_id = i;
+      continue;
+    }
     if (!IsDeleted(ctrl[i])) continue;
-    const size_t hash = (*hasher)(set, slot_ptr);
+    const size_t hash = (*hasher)(hash_fn, slot_ptr);
     const FindInfo target = find_first_non_full(common, hash);
     const size_t new_i = target.offset;
     total_probe_length += target.probe_length;
@@ -202,16 +261,26 @@
       SetCtrl(common, new_i, H2(hash), slot_size);
       (*transfer)(set, new_slot_ptr, slot_ptr);
       SetCtrl(common, i, ctrl_t::kEmpty, slot_size);
+      // Initialize or change empty space id.
+      tmp_space_id = i;
     } else {
       assert(IsDeleted(ctrl[new_i]));
       SetCtrl(common, new_i, H2(hash), slot_size);
       // Until we are done rehashing, DELETED marks previously FULL slots.
 
+      if (tmp_space_id == kUnknownId) {
+        tmp_space_id = FindEmptySlot(i + 1, capacity, ctrl);
+      }
+      void* tmp_space = SlotAddress(slot_array, tmp_space_id, slot_size);
+      SanitizerUnpoisonMemoryRegion(tmp_space, slot_size);
+
       // Swap i and new_i elements.
       (*transfer)(set, tmp_space, new_slot_ptr);
       (*transfer)(set, new_slot_ptr, slot_ptr);
       (*transfer)(set, slot_ptr, tmp_space);
 
+      SanitizerPoisonMemoryRegion(tmp_space, slot_size);
+
       // repeat the processing of the ith slot
       --i;
       slot_ptr = PrevSlot(slot_ptr, slot_size);
@@ -238,6 +307,8 @@
              Group::kWidth;
 }
 
+}  // namespace
+
 void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size) {
   assert(IsFull(c.control()[index]) && "erasing a dangling iterator");
   c.decrement_size();
@@ -245,17 +316,19 @@
 
   if (WasNeverFull(c, index)) {
     SetCtrl(c, index, ctrl_t::kEmpty, slot_size);
-    c.set_growth_left(c.growth_left() + 1);
+    c.growth_info().OverwriteFullAsEmpty();
     return;
   }
 
+  c.growth_info().OverwriteFullAsDeleted();
   SetCtrl(c, index, ctrl_t::kDeleted, slot_size);
 }
 
 void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
-                       bool reuse) {
+                       bool reuse, bool soo_enabled) {
   c.set_size(0);
   if (reuse) {
+    assert(!soo_enabled || c.capacity() > SooCapacity());
     ResetCtrl(c, policy.slot_size);
     ResetGrowthLeft(c);
     c.infoz().RecordStorageChanged(0, c.capacity());
@@ -263,118 +336,308 @@
     // We need to record infoz before calling dealloc, which will unregister
     // infoz.
     c.infoz().RecordClearedReservation();
-    c.infoz().RecordStorageChanged(0, 0);
+    c.infoz().RecordStorageChanged(0, soo_enabled ? SooCapacity() : 0);
     (*policy.dealloc)(c, policy);
-    c.set_control(EmptyGroup());
-    c.set_generation_ptr(EmptyGeneration());
-    c.set_slots(nullptr);
-    c.set_capacity(0);
+    c = soo_enabled ? CommonFields{soo_tag_t{}} : CommonFields{};
   }
 }
 
 void HashSetResizeHelper::GrowIntoSingleGroupShuffleControlBytes(
-    ctrl_t* new_ctrl, size_t new_capacity) const {
+    ctrl_t* __restrict new_ctrl, size_t new_capacity) const {
   assert(is_single_group(new_capacity));
   constexpr size_t kHalfWidth = Group::kWidth / 2;
+  constexpr size_t kQuarterWidth = Group::kWidth / 4;
   assert(old_capacity_ < kHalfWidth);
+  static_assert(sizeof(uint64_t) >= kHalfWidth,
+                "Group size is too large. The ctrl bytes for half a group must "
+                "fit into a uint64_t for this implementation.");
+  static_assert(sizeof(uint64_t) <= Group::kWidth,
+                "Group size is too small. The ctrl bytes for a group must "
+                "cover a uint64_t for this implementation.");
 
   const size_t half_old_capacity = old_capacity_ / 2;
 
   // NOTE: operations are done with compile time known size = kHalfWidth.
   // Compiler optimizes that into single ASM operation.
 
-  // Copy second half of bytes to the beginning.
-  // We potentially copy more bytes in order to have compile time known size.
-  // Mirrored bytes from the old_ctrl_ will also be copied.
-  // In case of old_capacity_ == 3, we will copy 1st element twice.
+  // Load the bytes from half_old_capacity + 1. This contains the last half of
+  // old_ctrl bytes, followed by the sentinel byte, and then the first half of
+  // the cloned bytes. This effectively shuffles the control bytes.
+  uint64_t copied_bytes = 0;
+  copied_bytes =
+      absl::little_endian::Load64(old_ctrl() + half_old_capacity + 1);
+
+  // We change the sentinel byte to kEmpty before storing to both the start of
+  // the new_ctrl, and past the end of the new_ctrl later for the new cloned
+  // bytes. Note that this is faster than setting the sentinel byte to kEmpty
+  // after the copy directly in new_ctrl because we are limited on store
+  // bandwidth.
+  constexpr uint64_t kEmptyXorSentinel =
+      static_cast<uint8_t>(ctrl_t::kEmpty) ^
+      static_cast<uint8_t>(ctrl_t::kSentinel);
+  const uint64_t mask_convert_old_sentinel_to_empty =
+      kEmptyXorSentinel << (half_old_capacity * 8);
+  copied_bytes ^= mask_convert_old_sentinel_to_empty;
+
+  // Copy second half of bytes to the beginning. This correctly sets the bytes
+  // [0, old_capacity]. We potentially copy more bytes in order to have compile
+  // time known size. Mirrored bytes from the old_ctrl() will also be copied. In
+  // case of old_capacity_ == 3, we will copy 1st element twice.
   // Examples:
+  // (old capacity = 1)
   // old_ctrl = 0S0EEEEEEE...
-  // new_ctrl = S0EEEEEEEE...
+  // new_ctrl = E0EEEEEE??...
   //
-  // old_ctrl = 01S01EEEEE...
-  // new_ctrl = 1S01EEEEEE...
+  // (old capacity = 3)
+  // old_ctrl = 012S012EEEEE...
+  // new_ctrl = 12E012EE????...
   //
+  // (old capacity = 7)
   // old_ctrl = 0123456S0123456EE...
-  // new_ctrl = 456S0123?????????...
-  std::memcpy(new_ctrl, old_ctrl_ + half_old_capacity + 1, kHalfWidth);
-  // Clean up copied kSentinel from old_ctrl.
-  new_ctrl[half_old_capacity] = ctrl_t::kEmpty;
+  // new_ctrl = 456E0123?????????...
+  absl::little_endian::Store64(new_ctrl, copied_bytes);
 
-  // Clean up damaged or uninitialized bytes.
-
-  // Clean bytes after the intended size of the copy.
-  // Example:
-  // new_ctrl = 1E01EEEEEEE????
-  // *new_ctrl= 1E0EEEEEEEE????
-  // position      /
+  // Set the space [old_capacity + 1, new_capacity] to empty as these bytes will
+  // not be written again. This is safe because
+  // NumControlBytes = new_capacity + kWidth and new_capacity >=
+  // old_capacity+1.
+  // Examples:
+  // (old_capacity = 3, new_capacity = 15)
+  // new_ctrl  = 12E012EE?????????????...??
+  // *new_ctrl = 12E0EEEEEEEEEEEEEEEE?...??
+  // position        /          S
+  //
+  // (old_capacity = 7, new_capacity = 15)
+  // new_ctrl  = 456E0123?????????????????...??
+  // *new_ctrl = 456E0123EEEEEEEEEEEEEEEE?...??
+  // position            /      S
   std::memset(new_ctrl + old_capacity_ + 1, static_cast<int8_t>(ctrl_t::kEmpty),
-              kHalfWidth);
-  // Clean non-mirrored bytes that are not initialized.
-  // For small old_capacity that may be inside of mirrored bytes zone.
+              Group::kWidth);
+
+  // Set the last kHalfWidth bytes to empty, to ensure the bytes all the way to
+  // the end are initialized.
   // Examples:
-  // new_ctrl = 1E0EEEEEEEE??????????....
-  // *new_ctrl= 1E0EEEEEEEEEEEEE?????....
-  // position           /
+  // new_ctrl  = 12E0EEEEEEEEEEEEEEEE?...???????
+  // *new_ctrl = 12E0EEEEEEEEEEEEEEEE???EEEEEEEE
+  // position                   S       /
   //
-  // new_ctrl = 456E0123???????????...
-  // *new_ctrl= 456E0123EEEEEEEE???...
-  // position           /
-  std::memset(new_ctrl + kHalfWidth, static_cast<int8_t>(ctrl_t::kEmpty),
-              kHalfWidth);
-  // Clean last mirrored bytes that are not initialized
-  // and will not be overwritten by mirroring.
-  // Examples:
-  // new_ctrl = 1E0EEEEEEEEEEEEE????????
-  // *new_ctrl= 1E0EEEEEEEEEEEEEEEEEEEEE
-  // position           S       /
-  //
-  // new_ctrl = 456E0123EEEEEEEE???????????????
-  // *new_ctrl= 456E0123EEEEEEEE???????EEEEEEEE
-  // position                  S       /
-  std::memset(new_ctrl + new_capacity + kHalfWidth,
+  // new_ctrl  = 456E0123EEEEEEEEEEEEEEEE???????
+  // *new_ctrl = 456E0123EEEEEEEEEEEEEEEEEEEEEEE
+  // position                   S       /
+  std::memset(new_ctrl + NumControlBytes(new_capacity) - kHalfWidth,
               static_cast<int8_t>(ctrl_t::kEmpty), kHalfWidth);
 
-  // Create mirrored bytes. old_capacity_ < kHalfWidth
-  // Example:
-  // new_ctrl = 456E0123EEEEEEEE???????EEEEEEEE
-  // *new_ctrl= 456E0123EEEEEEEE456E0123EEEEEEE
-  // position                  S/
-  ctrl_t g[kHalfWidth];
-  std::memcpy(g, new_ctrl, kHalfWidth);
-  std::memcpy(new_ctrl + new_capacity + 1, g, kHalfWidth);
+  // Copy the first bytes to the end (starting at new_capacity +1) to set the
+  // cloned bytes. Note that we use the already copied bytes from old_ctrl here
+  // rather than copying from new_ctrl to avoid a Read-after-Write hazard, since
+  // new_ctrl was just written to. The first old_capacity-1 bytes are set
+  // correctly. Then there may be up to old_capacity bytes that need to be
+  // overwritten, and any remaining bytes will be correctly set to empty. This
+  // sets [new_capacity + 1, new_capacity +1 + old_capacity] correctly.
+  // Examples:
+  // new_ctrl  = 12E0EEEEEEEEEEEEEEEE?...???????
+  // *new_ctrl = 12E0EEEEEEEEEEEE12E012EEEEEEEEE
+  // position                   S/
+  //
+  // new_ctrl  = 456E0123EEEEEEEE?...???EEEEEEEE
+  // *new_ctrl = 456E0123EEEEEEEE456E0123EEEEEEE
+  // position                   S/
+  absl::little_endian::Store64(new_ctrl + new_capacity + 1, copied_bytes);
 
-  // Finally set sentinel to its place.
+  // Set The remaining bytes at the end past the cloned bytes to empty. The
+  // incorrectly set bytes are [new_capacity + old_capacity + 2,
+  // min(new_capacity + 1 + kHalfWidth, new_capacity + old_capacity + 2 +
+  // half_old_capacity)]. Taking the difference, we need to set min(kHalfWidth -
+  // (old_capacity + 1), half_old_capacity)]. Since old_capacity < kHalfWidth,
+  // half_old_capacity < kQuarterWidth, so we set kQuarterWidth beginning at
+  // new_capacity + old_capacity + 2 to kEmpty.
+  // Examples:
+  // new_ctrl  = 12E0EEEEEEEEEEEE12E012EEEEEEEEE
+  // *new_ctrl = 12E0EEEEEEEEEEEE12E0EEEEEEEEEEE
+  // position                   S    /
+  //
+  // new_ctrl  = 456E0123EEEEEEEE456E0123EEEEEEE
+  // *new_ctrl = 456E0123EEEEEEEE456E0123EEEEEEE (no change)
+  // position                   S        /
+  std::memset(new_ctrl + new_capacity + old_capacity_ + 2,
+              static_cast<int8_t>(ctrl_t::kEmpty), kQuarterWidth);
+
+  // Finally, we set the new sentinel byte.
+  new_ctrl[new_capacity] = ctrl_t::kSentinel;
+}
+
+void HashSetResizeHelper::InitControlBytesAfterSoo(ctrl_t* new_ctrl, ctrl_t h2,
+                                                   size_t new_capacity) {
+  assert(is_single_group(new_capacity));
+  std::memset(new_ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
+              NumControlBytes(new_capacity));
+  assert(HashSetResizeHelper::SooSlotIndex() == 1);
+  // This allows us to avoid branching on had_soo_slot_.
+  assert(had_soo_slot_ || h2 == ctrl_t::kEmpty);
+  new_ctrl[1] = new_ctrl[new_capacity + 2] = h2;
   new_ctrl[new_capacity] = ctrl_t::kSentinel;
 }
 
 void HashSetResizeHelper::GrowIntoSingleGroupShuffleTransferableSlots(
-    void* old_slots, void* new_slots, size_t slot_size) const {
+    void* new_slots, size_t slot_size) const {
   assert(old_capacity_ > 0);
   const size_t half_old_capacity = old_capacity_ / 2;
 
-  SanitizerUnpoisonMemoryRegion(old_slots, slot_size * old_capacity_);
+  SanitizerUnpoisonMemoryRegion(old_slots(), slot_size * old_capacity_);
   std::memcpy(new_slots,
-              SlotAddress(old_slots, half_old_capacity + 1, slot_size),
+              SlotAddress(old_slots(), half_old_capacity + 1, slot_size),
               slot_size * half_old_capacity);
   std::memcpy(SlotAddress(new_slots, half_old_capacity + 1, slot_size),
-              old_slots, slot_size * (half_old_capacity + 1));
+              old_slots(), slot_size * (half_old_capacity + 1));
 }
 
 void HashSetResizeHelper::GrowSizeIntoSingleGroupTransferable(
-    CommonFields& c, void* old_slots, size_t slot_size) {
+    CommonFields& c, size_t slot_size) {
   assert(old_capacity_ < Group::kWidth / 2);
   assert(is_single_group(c.capacity()));
   assert(IsGrowingIntoSingleGroupApplicable(old_capacity_, c.capacity()));
 
   GrowIntoSingleGroupShuffleControlBytes(c.control(), c.capacity());
-  GrowIntoSingleGroupShuffleTransferableSlots(old_slots, c.slot_array(),
-                                              slot_size);
+  GrowIntoSingleGroupShuffleTransferableSlots(c.slot_array(), slot_size);
 
   // We poison since GrowIntoSingleGroupShuffleTransferableSlots
   // may leave empty slots unpoisoned.
   PoisonSingleGroupEmptySlots(c, slot_size);
 }
 
+void HashSetResizeHelper::TransferSlotAfterSoo(CommonFields& c,
+                                               size_t slot_size) {
+  assert(was_soo_);
+  assert(had_soo_slot_);
+  assert(is_single_group(c.capacity()));
+  std::memcpy(SlotAddress(c.slot_array(), SooSlotIndex(), slot_size),
+              old_soo_data(), slot_size);
+  PoisonSingleGroupEmptySlots(c, slot_size);
+}
+
+namespace {
+
+// Called whenever the table needs to vacate empty slots either by removing
+// tombstones via rehash or growth.
+ABSL_ATTRIBUTE_NOINLINE
+FindInfo FindInsertPositionWithGrowthOrRehash(CommonFields& common, size_t hash,
+                                              const PolicyFunctions& policy) {
+  const size_t cap = common.capacity();
+  if (cap > Group::kWidth &&
+      // Do these calculations in 64-bit to avoid overflow.
+      common.size() * uint64_t{32} <= cap * uint64_t{25}) {
+    // Squash DELETED without growing if there is enough capacity.
+    //
+    // Rehash in place if the current size is <= 25/32 of capacity.
+    // Rationale for such a high factor: 1) DropDeletesWithoutResize() is
+    // faster than resize, and 2) it takes quite a bit of work to add
+    // tombstones.  In the worst case, seems to take approximately 4
+    // insert/erase pairs to create a single tombstone and so if we are
+    // rehashing because of tombstones, we can afford to rehash-in-place as
+    // long as we are reclaiming at least 1/8 the capacity without doing more
+    // than 2X the work.  (Where "work" is defined to be size() for rehashing
+    // or rehashing in place, and 1 for an insert or erase.)  But rehashing in
+    // place is faster per operation than inserting or even doubling the size
+    // of the table, so we actually afford to reclaim even less space from a
+    // resize-in-place.  The decision is to rehash in place if we can reclaim
+    // at about 1/8th of the usable capacity (specifically 3/28 of the
+    // capacity) which means that the total cost of rehashing will be a small
+    // fraction of the total work.
+    //
+    // Here is output of an experiment using the BM_CacheInSteadyState
+    // benchmark running the old case (where we rehash-in-place only if we can
+    // reclaim at least 7/16*capacity) vs. this code (which rehashes in place
+    // if we can recover 3/32*capacity).
+    //
+    // Note that although in the worst-case number of rehashes jumped up from
+    // 15 to 190, but the number of operations per second is almost the same.
+    //
+    // Abridged output of running BM_CacheInSteadyState benchmark from
+    // raw_hash_set_benchmark.   N is the number of insert/erase operations.
+    //
+    //      | OLD (recover >= 7/16        | NEW (recover >= 3/32)
+    // size |    N/s LoadFactor NRehashes |    N/s LoadFactor NRehashes
+    //  448 | 145284       0.44        18 | 140118       0.44        19
+    //  493 | 152546       0.24        11 | 151417       0.48        28
+    //  538 | 151439       0.26        11 | 151152       0.53        38
+    //  583 | 151765       0.28        11 | 150572       0.57        50
+    //  628 | 150241       0.31        11 | 150853       0.61        66
+    //  672 | 149602       0.33        12 | 150110       0.66        90
+    //  717 | 149998       0.35        12 | 149531       0.70       129
+    //  762 | 149836       0.37        13 | 148559       0.74       190
+    //  807 | 149736       0.39        14 | 151107       0.39        14
+    //  852 | 150204       0.42        15 | 151019       0.42        15
+    DropDeletesWithoutResize(common, policy);
+  } else {
+    // Otherwise grow the container.
+    policy.resize(common, NextCapacity(cap), HashtablezInfoHandle{});
+  }
+  // This function is typically called with tables containing deleted slots.
+  // The table will be big and `FindFirstNonFullAfterResize` will always
+  // fallback to `find_first_non_full`. So using `find_first_non_full` directly.
+  return find_first_non_full(common, hash);
+}
+
+}  // namespace
+
+const void* GetHashRefForEmptyHasher(const CommonFields& common) {
+  // Empty base optimization typically make the empty base class address to be
+  // the same as the first address of the derived class object.
+  // But we generally assume that for empty hasher we can return any valid
+  // pointer.
+  return &common;
+}
+
+size_t PrepareInsertNonSoo(CommonFields& common, size_t hash, FindInfo target,
+                           const PolicyFunctions& policy) {
+  // When there are no deleted slots in the table
+  // and growth_left is positive, we can insert at the first
+  // empty slot in the probe sequence (target).
+  const bool use_target_hint =
+      // Optimization is disabled when generations are enabled.
+      // We have to rehash even sparse tables randomly in such mode.
+      !SwisstableGenerationsEnabled() &&
+      common.growth_info().HasNoDeletedAndGrowthLeft();
+  if (ABSL_PREDICT_FALSE(!use_target_hint)) {
+    // Notes about optimized mode when generations are disabled:
+    // We do not enter this branch if table has no deleted slots
+    // and growth_left is positive.
+    // We enter this branch in the following cases listed in decreasing
+    // frequency:
+    // 1. Table without deleted slots (>95% cases) that needs to be resized.
+    // 2. Table with deleted slots that has space for the inserting element.
+    // 3. Table with deleted slots that needs to be rehashed or resized.
+    if (ABSL_PREDICT_TRUE(common.growth_info().HasNoGrowthLeftAndNoDeleted())) {
+      const size_t old_capacity = common.capacity();
+      policy.resize(common, NextCapacity(old_capacity), HashtablezInfoHandle{});
+      target = HashSetResizeHelper::FindFirstNonFullAfterResize(
+          common, old_capacity, hash);
+    } else {
+      // Note: the table may have no deleted slots here when generations
+      // are enabled.
+      const bool rehash_for_bug_detection =
+          common.should_rehash_for_bug_detection_on_insert();
+      if (rehash_for_bug_detection) {
+        // Move to a different heap allocation in order to detect bugs.
+        const size_t cap = common.capacity();
+        policy.resize(common,
+                      common.growth_left() > 0 ? cap : NextCapacity(cap),
+                      HashtablezInfoHandle{});
+      }
+      if (ABSL_PREDICT_TRUE(common.growth_left() > 0)) {
+        target = find_first_non_full(common, hash);
+      } else {
+        target = FindInsertPositionWithGrowthOrRehash(common, hash, policy);
+      }
+    }
+  }
+  PrepareInsertCommon(common);
+  common.growth_info().OverwriteControlAsFull(common.control()[target.offset]);
+  SetCtrl(common, target.offset, H2(hash), policy.slot_size);
+  common.infoz().RecordInsert(hash, target.probe_length);
+  return target.offset;
+}
+
 }  // namespace container_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h
index 3518bc3..d4fe8f5 100644
--- a/absl/container/internal/raw_hash_set.h
+++ b/absl/container/internal/raw_hash_set.h
@@ -80,7 +80,7 @@
 //     slot_type slots[capacity];
 //   };
 //
-// The length of this array is computed by `AllocSize()` below.
+// The length of this array is computed by `RawHashSetLayout::alloc_size` below.
 //
 // Control bytes (`ctrl_t`) are bytes (collected into groups of a
 // platform-specific size) that define the state of the corresponding slot in
@@ -100,6 +100,13 @@
 // Storing control bytes in a separate array also has beneficial cache effects,
 // since more logical slots will fit into a cache line.
 //
+// # Small Object Optimization (SOO)
+//
+// When the size/alignment of the value_type and the capacity of the table are
+// small, we enable small object optimization and store the values inline in
+// the raw_hash_set object. This optimization allows us to avoid
+// allocation/deallocation as well as cache/dTLB misses.
+//
 // # Hashing
 //
 // We compute two separate hashes, `H1` and `H2`, from the hash of an object.
@@ -233,9 +240,10 @@
 
 #ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
 #error ABSL_SWISSTABLE_ENABLE_GENERATIONS cannot be directly set
-#elif defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
-    defined(ABSL_HAVE_HWADDRESS_SANITIZER) || \
-    defined(ABSL_HAVE_MEMORY_SANITIZER)
+#elif (defined(ABSL_HAVE_ADDRESS_SANITIZER) ||   \
+       defined(ABSL_HAVE_HWADDRESS_SANITIZER) || \
+       defined(ABSL_HAVE_MEMORY_SANITIZER)) &&   \
+    !defined(NDEBUG_SANITIZER)  // If defined, performance is important.
 // When compiled in sanitizer mode, we add generation integers to the backing
 // array and iterators. In the backing array, we store the generation between
 // the control bytes and the slots. When iterators are dereferenced, we assert
@@ -374,6 +382,9 @@
   return static_cast<uint32_t>(countr_zero(x));
 }
 
+// 8 bytes bitmask with most significant bit set for every byte.
+constexpr uint64_t kMsbs8Bytes = 0x8080808080808080ULL;
+
 // An abstract bitmask, such as that emitted by a SIMD instruction.
 //
 // Specifically, this type implements a simple bitset whose representation is
@@ -423,27 +434,35 @@
 // an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When
 // `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as
 // the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask.
+// If NullifyBitsOnIteration is true (only allowed for Shift == 3),
+// non zero abstract bit is allowed to have additional bits
+// (e.g., `0xff`, `0x83` and `0x9c` are ok, but `0x6f` is not).
 //
 // For example:
 //   for (int i : BitMask<uint32_t, 16>(0b101)) -> yields 0, 2
 //   for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
-template <class T, int SignificantBits, int Shift = 0>
+template <class T, int SignificantBits, int Shift = 0,
+          bool NullifyBitsOnIteration = false>
 class BitMask : public NonIterableBitMask<T, SignificantBits, Shift> {
   using Base = NonIterableBitMask<T, SignificantBits, Shift>;
   static_assert(std::is_unsigned<T>::value, "");
   static_assert(Shift == 0 || Shift == 3, "");
+  static_assert(!NullifyBitsOnIteration || Shift == 3, "");
 
  public:
-  explicit BitMask(T mask) : Base(mask) {}
+  explicit BitMask(T mask) : Base(mask) {
+    if (Shift == 3 && !NullifyBitsOnIteration) {
+      assert(this->mask_ == (this->mask_ & kMsbs8Bytes));
+    }
+  }
   // BitMask is an iterator over the indices of its abstract bits.
   using value_type = int;
   using iterator = BitMask;
   using const_iterator = BitMask;
 
   BitMask& operator++() {
-    if (Shift == 3) {
-      constexpr uint64_t msbs = 0x8080808080808080ULL;
-      this->mask_ &= msbs;
+    if (Shift == 3 && NullifyBitsOnIteration) {
+      this->mask_ &= kMsbs8Bytes;
     }
     this->mask_ &= (this->mask_ - 1);
     return *this;
@@ -520,10 +539,24 @@
 // Returns a pointer to a control byte group that can be used by empty tables.
 inline ctrl_t* EmptyGroup() {
   // Const must be cast away here; no uses of this function will actually write
-  // to it, because it is only used for empty tables.
+  // to it because it is only used for empty tables.
   return const_cast<ctrl_t*>(kEmptyGroup + 16);
 }
 
+// For use in SOO iterators.
+// TODO(b/289225379): we could potentially get rid of this by adding an is_soo
+// bit in iterators. This would add branches but reduce cache misses.
+ABSL_DLL extern const ctrl_t kSooControl[17];
+
+// Returns a pointer to a full byte followed by a sentinel byte.
+inline ctrl_t* SooControl() {
+  // Const must be cast away here; no uses of this function will actually write
+  // to it because it is only used for SOO iterators.
+  return const_cast<ctrl_t*>(kSooControl);
+}
+// Whether ctrl is from the SooControl array.
+inline bool IsSooControl(const ctrl_t* ctrl) { return ctrl == SooControl(); }
+
 // Returns a pointer to a generation to use for an empty hashtable.
 GenerationType* EmptyGeneration();
 
@@ -535,7 +568,37 @@
 
 // Mixes a randomly generated per-process seed with `hash` and `ctrl` to
 // randomize insertion order within groups.
-bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl);
+bool ShouldInsertBackwardsForDebug(size_t capacity, size_t hash,
+                                   const ctrl_t* ctrl);
+
+ABSL_ATTRIBUTE_ALWAYS_INLINE inline bool ShouldInsertBackwards(
+    ABSL_ATTRIBUTE_UNUSED size_t capacity, ABSL_ATTRIBUTE_UNUSED size_t hash,
+    ABSL_ATTRIBUTE_UNUSED const ctrl_t* ctrl) {
+#if defined(NDEBUG)
+  return false;
+#else
+  return ShouldInsertBackwardsForDebug(capacity, hash, ctrl);
+#endif
+}
+
+// Returns insert position for the given mask.
+// We want to add entropy even when ASLR is not enabled.
+// In debug build we will randomly insert in either the front or back of
+// the group.
+// TODO(kfm,sbenza): revisit after we do unconditional mixing
+template <class Mask>
+ABSL_ATTRIBUTE_ALWAYS_INLINE inline auto GetInsertionOffset(
+    Mask mask, ABSL_ATTRIBUTE_UNUSED size_t capacity,
+    ABSL_ATTRIBUTE_UNUSED size_t hash,
+    ABSL_ATTRIBUTE_UNUSED const ctrl_t* ctrl) {
+#if defined(NDEBUG)
+  return mask.LowestBitSet();
+#else
+  return ShouldInsertBackwardsForDebug(capacity, hash, ctrl)
+             ? mask.HighestBitSet()
+             : mask.LowestBitSet();
+#endif
+}
 
 // Returns a per-table, hash salt, which changes on resize. This gets mixed into
 // H1 to randomize iteration order per-table.
@@ -560,7 +623,12 @@
 
 // Helpers for checking the state of a control byte.
 inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; }
-inline bool IsFull(ctrl_t c) { return c >= static_cast<ctrl_t>(0); }
+inline bool IsFull(ctrl_t c) {
+  // Cast `c` to the underlying type instead of casting `0` to `ctrl_t` as `0`
+  // is not a value in the enum. Both ways are equivalent, but this way makes
+  // linters happier.
+  return static_cast<std::underlying_type_t<ctrl_t>>(c) >= 0;
+}
 inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; }
 inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; }
 
@@ -646,6 +714,14 @@
         static_cast<uint16_t>(_mm_movemask_epi8(ctrl) ^ 0xffff));
   }
 
+  // Returns a bitmask representing the positions of non full slots.
+  // Note: this includes: kEmpty, kDeleted, kSentinel.
+  // It is useful in contexts when kSentinel is not present.
+  auto MaskNonFull() const {
+    return BitMask<uint16_t, kWidth>(
+        static_cast<uint16_t>(_mm_movemask_epi8(ctrl)));
+  }
+
   // Returns a bitmask representing the positions of empty or deleted slots.
   NonIterableBitMask<uint16_t, kWidth> MaskEmptyOrDeleted() const {
     auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
@@ -685,10 +761,11 @@
     ctrl = vld1_u8(reinterpret_cast<const uint8_t*>(pos));
   }
 
-  BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
+  auto Match(h2_t hash) const {
     uint8x8_t dup = vdup_n_u8(hash);
     auto mask = vceq_u8(ctrl, dup);
-    return BitMask<uint64_t, kWidth, 3>(
+    return BitMask<uint64_t, kWidth, /*Shift=*/3,
+                   /*NullifyBitsOnIteration=*/true>(
         vget_lane_u64(vreinterpret_u64_u8(mask), 0));
   }
 
@@ -704,12 +781,25 @@
   // Returns a bitmask representing the positions of full slots.
   // Note: for `is_small()` tables group may contain the "same" slot twice:
   // original and mirrored.
-  BitMask<uint64_t, kWidth, 3> MaskFull() const {
+  auto MaskFull() const {
     uint64_t mask = vget_lane_u64(
         vreinterpret_u64_u8(vcge_s8(vreinterpret_s8_u8(ctrl),
                                     vdup_n_s8(static_cast<int8_t>(0)))),
         0);
-    return BitMask<uint64_t, kWidth, 3>(mask);
+    return BitMask<uint64_t, kWidth, /*Shift=*/3,
+                   /*NullifyBitsOnIteration=*/true>(mask);
+  }
+
+  // Returns a bitmask representing the positions of non full slots.
+  // Note: this includes: kEmpty, kDeleted, kSentinel.
+  // It is useful in contexts when kSentinel is not present.
+  auto MaskNonFull() const {
+    uint64_t mask = vget_lane_u64(
+        vreinterpret_u64_u8(vclt_s8(vreinterpret_s8_u8(ctrl),
+                                    vdup_n_s8(static_cast<int8_t>(0)))),
+        0);
+    return BitMask<uint64_t, kWidth, /*Shift=*/3,
+                   /*NullifyBitsOnIteration=*/true>(mask);
   }
 
   NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
@@ -736,11 +826,10 @@
 
   void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
     uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0);
-    constexpr uint64_t msbs = 0x8080808080808080ULL;
     constexpr uint64_t slsbs = 0x0202020202020202ULL;
     constexpr uint64_t midbs = 0x7e7e7e7e7e7e7e7eULL;
     auto x = slsbs & (mask >> 6);
-    auto res = (x + midbs) | msbs;
+    auto res = (x + midbs) | kMsbs8Bytes;
     little_endian::Store64(dst, res);
   }
 
@@ -768,30 +857,33 @@
     //   v = 0x1716151413121110
     //   hash = 0x12
     //   retval = (v - lsbs) & ~v & msbs = 0x0000000080800000
-    constexpr uint64_t msbs = 0x8080808080808080ULL;
     constexpr uint64_t lsbs = 0x0101010101010101ULL;
     auto x = ctrl ^ (lsbs * hash);
-    return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & msbs);
+    return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & kMsbs8Bytes);
   }
 
   NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
-    constexpr uint64_t msbs = 0x8080808080808080ULL;
     return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & ~(ctrl << 6)) &
-                                                   msbs);
+                                                   kMsbs8Bytes);
   }
 
   // Returns a bitmask representing the positions of full slots.
   // Note: for `is_small()` tables group may contain the "same" slot twice:
   // original and mirrored.
   BitMask<uint64_t, kWidth, 3> MaskFull() const {
-    constexpr uint64_t msbs = 0x8080808080808080ULL;
-    return BitMask<uint64_t, kWidth, 3>((ctrl ^ msbs) & msbs);
+    return BitMask<uint64_t, kWidth, 3>((ctrl ^ kMsbs8Bytes) & kMsbs8Bytes);
+  }
+
+  // Returns a bitmask representing the positions of non full slots.
+  // Note: this includes: kEmpty, kDeleted, kSentinel.
+  // It is useful in contexts when kSentinel is not present.
+  auto MaskNonFull() const {
+    return BitMask<uint64_t, kWidth, 3>(ctrl & kMsbs8Bytes);
   }
 
   NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
-    constexpr uint64_t msbs = 0x8080808080808080ULL;
     return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & ~(ctrl << 7)) &
-                                                   msbs);
+                                                   kMsbs8Bytes);
   }
 
   uint32_t CountLeadingEmptyOrDeleted() const {
@@ -803,9 +895,8 @@
   }
 
   void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
-    constexpr uint64_t msbs = 0x8080808080808080ULL;
     constexpr uint64_t lsbs = 0x0101010101010101ULL;
-    auto x = ctrl & msbs;
+    auto x = ctrl & kMsbs8Bytes;
     auto res = (~x + (x >> 7)) & ~lsbs;
     little_endian::Store64(dst, res);
   }
@@ -815,21 +906,21 @@
 
 #ifdef ABSL_INTERNAL_HAVE_SSE2
 using Group = GroupSse2Impl;
-using GroupEmptyOrDeleted = GroupSse2Impl;
+using GroupFullEmptyOrDeleted = GroupSse2Impl;
 #elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
 using Group = GroupAArch64Impl;
 // For Aarch64, we use the portable implementation for counting and masking
-// empty or deleted group elements. This is to avoid the latency of moving
+// full, empty or deleted group elements. This is to avoid the latency of moving
 // between data GPRs and Neon registers when it does not provide a benefit.
 // Using Neon is profitable when we call Match(), but is not when we don't,
-// which is the case when we do *EmptyOrDeleted operations. It is difficult to
-// make a similar approach beneficial on other architectures such as x86 since
-// they have much lower GPR <-> vector register transfer latency and 16-wide
-// Groups.
-using GroupEmptyOrDeleted = GroupPortableImpl;
+// which is the case when we do *EmptyOrDeleted and MaskFull operations.
+// It is difficult to make a similar approach beneficial on other architectures
+// such as x86 since they have much lower GPR <-> vector register transfer
+// latency and 16-wide Groups.
+using GroupFullEmptyOrDeleted = GroupPortableImpl;
 #else
 using Group = GroupPortableImpl;
-using GroupEmptyOrDeleted = GroupPortableImpl;
+using GroupFullEmptyOrDeleted = GroupPortableImpl;
 #endif
 
 // When there is an insertion with no reserved growth, we rehash with
@@ -978,17 +1069,96 @@
 using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoDisabled;
 #endif
 
+// Stored the information regarding number of slots we can still fill
+// without needing to rehash.
+//
+// We want to ensure sufficient number of empty slots in the table in order
+// to keep probe sequences relatively short. Empty slot in the probe group
+// is required to stop probing.
+//
+// Tombstones (kDeleted slots) are not included in the growth capacity,
+// because we'd like to rehash when the table is filled with tombstones and/or
+// full slots.
+//
+// GrowthInfo also stores a bit that encodes whether table may have any
+// deleted slots.
+// Most of the tables (>95%) have no deleted slots, so some functions can
+// be more efficient with this information.
+//
+// Callers can also force a rehash via the standard `rehash(0)`,
+// which will recompute this value as a side-effect.
+//
+// See also `CapacityToGrowth()`.
+class GrowthInfo {
+ public:
+  // Leaves data member uninitialized.
+  GrowthInfo() = default;
+
+  // Initializes the GrowthInfo assuming we can grow `growth_left` elements
+  // and there are no kDeleted slots in the table.
+  void InitGrowthLeftNoDeleted(size_t growth_left) {
+    growth_left_info_ = growth_left;
+  }
+
+  // Overwrites single full slot with an empty slot.
+  void OverwriteFullAsEmpty() { ++growth_left_info_; }
+
+  // Overwrites single empty slot with a full slot.
+  void OverwriteEmptyAsFull() {
+    assert(GetGrowthLeft() > 0);
+    --growth_left_info_;
+  }
+
+  // Overwrites several empty slots with full slots.
+  void OverwriteManyEmptyAsFull(size_t cnt) {
+    assert(GetGrowthLeft() >= cnt);
+    growth_left_info_ -= cnt;
+  }
+
+  // Overwrites specified control element with full slot.
+  void OverwriteControlAsFull(ctrl_t ctrl) {
+    assert(GetGrowthLeft() >= static_cast<size_t>(IsEmpty(ctrl)));
+    growth_left_info_ -= static_cast<size_t>(IsEmpty(ctrl));
+  }
+
+  // Overwrites single full slot with a deleted slot.
+  void OverwriteFullAsDeleted() { growth_left_info_ |= kDeletedBit; }
+
+  // Returns true if table satisfies two properties:
+  // 1. Guaranteed to have no kDeleted slots.
+  // 2. There is a place for at least one element to grow.
+  bool HasNoDeletedAndGrowthLeft() const {
+    return static_cast<std::make_signed_t<size_t>>(growth_left_info_) > 0;
+  }
+
+  // Returns true if the table satisfies two properties:
+  // 1. Guaranteed to have no kDeleted slots.
+  // 2. There is no growth left.
+  bool HasNoGrowthLeftAndNoDeleted() const { return growth_left_info_ == 0; }
+
+  // Returns true if table guaranteed to have no k
+  bool HasNoDeleted() const {
+    return static_cast<std::make_signed_t<size_t>>(growth_left_info_) >= 0;
+  }
+
+  // Returns the number of elements left to grow.
+  size_t GetGrowthLeft() const { return growth_left_info_ & kGrowthLeftMask; }
+
+ private:
+  static constexpr size_t kGrowthLeftMask = ((~size_t{}) >> 1);
+  static constexpr size_t kDeletedBit = ~kGrowthLeftMask;
+  // Topmost bit signal whenever there are deleted slots.
+  size_t growth_left_info_;
+};
+
+static_assert(sizeof(GrowthInfo) == sizeof(size_t), "");
+static_assert(alignof(GrowthInfo) == alignof(size_t), "");
+
 // Returns whether `n` is a valid capacity (i.e., number of slots).
 //
 // A valid capacity is a non-zero integer `2^m - 1`.
 inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
 
-// Computes the offset from the start of the backing allocation of control.
-// infoz and growth_left are stored at the beginning of the backing array.
-inline size_t ControlOffset(bool has_infoz) {
-  return (has_infoz ? sizeof(HashtablezInfoHandle) : 0) + sizeof(size_t);
-}
-
 // Returns the number of "cloned control bytes".
 //
 // This is the number of control bytes that are present both at the beginning
@@ -996,36 +1166,157 @@
 // `Group::kWidth`-width probe window starting from any control byte.
 constexpr size_t NumClonedBytes() { return Group::kWidth - 1; }
 
-// Given the capacity of a table, computes the offset (from the start of the
-// backing allocation) of the generation counter (if it exists).
-inline size_t GenerationOffset(size_t capacity, bool has_infoz) {
-  assert(IsValidCapacity(capacity));
-  const size_t num_control_bytes = capacity + 1 + NumClonedBytes();
-  return ControlOffset(has_infoz) + num_control_bytes;
+// Returns the number of control bytes including cloned.
+constexpr size_t NumControlBytes(size_t capacity) {
+  return capacity + 1 + NumClonedBytes();
 }
 
-// Given the capacity of a table, computes the offset (from the start of the
-// backing allocation) at which the slots begin.
-inline size_t SlotOffset(size_t capacity, size_t slot_align, bool has_infoz) {
-  assert(IsValidCapacity(capacity));
-  return (GenerationOffset(capacity, has_infoz) + NumGenerationBytes() +
-          slot_align - 1) &
-         (~slot_align + 1);
+// Computes the offset from the start of the backing allocation of control.
+// infoz and growth_info are stored at the beginning of the backing array.
+inline static size_t ControlOffset(bool has_infoz) {
+  return (has_infoz ? sizeof(HashtablezInfoHandle) : 0) + sizeof(GrowthInfo);
 }
 
-// Given the capacity of a table, computes the total size of the backing
-// array.
-inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align,
-                        bool has_infoz) {
-  return SlotOffset(capacity, slot_align, has_infoz) + capacity * slot_size;
-}
+// Helper class for computing offsets and allocation size of hash set fields.
+class RawHashSetLayout {
+ public:
+  explicit RawHashSetLayout(size_t capacity, size_t slot_align, bool has_infoz)
+      : capacity_(capacity),
+        control_offset_(ControlOffset(has_infoz)),
+        generation_offset_(control_offset_ + NumControlBytes(capacity)),
+        slot_offset_(
+            (generation_offset_ + NumGenerationBytes() + slot_align - 1) &
+            (~slot_align + 1)) {
+    assert(IsValidCapacity(capacity));
+  }
+
+  // Returns the capacity of a table.
+  size_t capacity() const { return capacity_; }
+
+  // Returns precomputed offset from the start of the backing allocation of
+  // control.
+  size_t control_offset() const { return control_offset_; }
+
+  // Given the capacity of a table, computes the offset (from the start of the
+  // backing allocation) of the generation counter (if it exists).
+  size_t generation_offset() const { return generation_offset_; }
+
+  // Given the capacity of a table, computes the offset (from the start of the
+  // backing allocation) at which the slots begin.
+  size_t slot_offset() const { return slot_offset_; }
+
+  // Given the capacity of a table, computes the total size of the backing
+  // array.
+  size_t alloc_size(size_t slot_size) const {
+    return slot_offset_ + capacity_ * slot_size;
+  }
+
+ private:
+  size_t capacity_;
+  size_t control_offset_;
+  size_t generation_offset_;
+  size_t slot_offset_;
+};
+
+struct HashtableFreeFunctionsAccess;
+
+// We only allow a maximum of 1 SOO element, which makes the implementation
+// much simpler. Complications with multiple SOO elements include:
+// - Satisfying the guarantee that erasing one element doesn't invalidate
+//   iterators to other elements means we would probably need actual SOO
+//   control bytes.
+// - In order to prevent user code from depending on iteration order for small
+//   tables, we would need to randomize the iteration order somehow.
+constexpr size_t SooCapacity() { return 1; }
+// Sentinel type to indicate SOO CommonFields construction.
+struct soo_tag_t {};
+// Sentinel type to indicate SOO CommonFields construction with full size.
+struct full_soo_tag_t {};
+
+// Suppress erroneous uninitialized memory errors on GCC. For example, GCC
+// thinks that the call to slot_array() in find_or_prepare_insert() is reading
+// uninitialized memory, but slot_array is only called there when the table is
+// non-empty and this memory is initialized when the table is non-empty.
+#if !defined(__clang__) && defined(__GNUC__)
+#define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(x)                    \
+  _Pragma("GCC diagnostic push")                                   \
+      _Pragma("GCC diagnostic ignored \"-Wmaybe-uninitialized\"")  \
+          _Pragma("GCC diagnostic ignored \"-Wuninitialized\"") x; \
+  _Pragma("GCC diagnostic pop")
+#define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(x) \
+  ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(return x)
+#else
+#define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(x) x
+#define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(x) return x
+#endif
+
+// This allows us to work around an uninitialized memory warning when
+// constructing begin() iterators in empty hashtables.
+union MaybeInitializedPtr {
+  void* get() const { ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(p); }
+  void set(void* ptr) { p = ptr; }
+
+  void* p;
+};
+
+struct HeapPtrs {
+  HeapPtrs() = default;
+  explicit HeapPtrs(ctrl_t* c) : control(c) {}
+
+  // The control bytes (and, also, a pointer near to the base of the backing
+  // array).
+  //
+  // This contains `capacity + 1 + NumClonedBytes()` entries, even
+  // when the table is empty (hence EmptyGroup).
+  //
+  // Note that growth_info is stored immediately before this pointer.
+  // May be uninitialized for SOO tables.
+  ctrl_t* control;
+
+  // The beginning of the slots, located at `SlotOffset()` bytes after
+  // `control`. May be uninitialized for empty tables.
+  // Note: we can't use `slots` because Qt defines "slots" as a macro.
+  MaybeInitializedPtr slot_array;
+};
+
+// Manages the backing array pointers or the SOO slot. When raw_hash_set::is_soo
+// is true, the SOO slot is stored in `soo_data`. Otherwise, we use `heap`.
+union HeapOrSoo {
+  HeapOrSoo() = default;
+  explicit HeapOrSoo(ctrl_t* c) : heap(c) {}
+
+  ctrl_t*& control() {
+    ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.control);
+  }
+  ctrl_t* control() const {
+    ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.control);
+  }
+  MaybeInitializedPtr& slot_array() {
+    ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.slot_array);
+  }
+  MaybeInitializedPtr slot_array() const {
+    ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.slot_array);
+  }
+  void* get_soo_data() {
+    ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(soo_data);
+  }
+  const void* get_soo_data() const {
+    ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(soo_data);
+  }
+
+  HeapPtrs heap;
+  unsigned char soo_data[sizeof(HeapPtrs)];
+};
 
 // CommonFields hold the fields in raw_hash_set that do not depend
 // on template parameters. This allows us to conveniently pass all
 // of this state to helper functions as a single argument.
 class CommonFields : public CommonFieldsGenerationInfo {
  public:
-  CommonFields() = default;
+  CommonFields() : capacity_(0), size_(0), heap_or_soo_(EmptyGroup()) {}
+  explicit CommonFields(soo_tag_t) : capacity_(SooCapacity()), size_(0) {}
+  explicit CommonFields(full_soo_tag_t)
+      : capacity_(SooCapacity()), size_(size_t{1} << HasInfozShift()) {}
 
   // Not copyable
   CommonFields(const CommonFields&) = delete;
@@ -1035,23 +1326,44 @@
   CommonFields(CommonFields&& that) = default;
   CommonFields& operator=(CommonFields&&) = default;
 
-  ctrl_t* control() const { return control_; }
-  void set_control(ctrl_t* c) { control_ = c; }
+  template <bool kSooEnabled>
+  static CommonFields CreateDefault() {
+    return kSooEnabled ? CommonFields{soo_tag_t{}} : CommonFields{};
+  }
+
+  // The inline data for SOO is written on top of control_/slots_.
+  const void* soo_data() const { return heap_or_soo_.get_soo_data(); }
+  void* soo_data() { return heap_or_soo_.get_soo_data(); }
+
+  HeapOrSoo heap_or_soo() const { return heap_or_soo_; }
+  const HeapOrSoo& heap_or_soo_ref() const { return heap_or_soo_; }
+
+  ctrl_t* control() const { return heap_or_soo_.control(); }
+  void set_control(ctrl_t* c) { heap_or_soo_.control() = c; }
   void* backing_array_start() const {
-    // growth_left (and maybe infoz) is stored before control bytes.
+    // growth_info (and maybe infoz) is stored before control bytes.
     assert(reinterpret_cast<uintptr_t>(control()) % alignof(size_t) == 0);
     return control() - ControlOffset(has_infoz());
   }
 
   // Note: we can't use slots() because Qt defines "slots" as a macro.
-  void* slot_array() const { return slots_; }
-  void set_slots(void* s) { slots_ = s; }
+  void* slot_array() const { return heap_or_soo_.slot_array().get(); }
+  MaybeInitializedPtr slots_union() const { return heap_or_soo_.slot_array(); }
+  void set_slots(void* s) { heap_or_soo_.slot_array().set(s); }
 
   // The number of filled slots.
   size_t size() const { return size_ >> HasInfozShift(); }
   void set_size(size_t s) {
     size_ = (s << HasInfozShift()) | (size_ & HasInfozMask());
   }
+  void set_empty_soo() {
+    AssertInSooMode();
+    size_ = 0;
+  }
+  void set_full_soo() {
+    AssertInSooMode();
+    size_ = size_t{1} << HasInfozShift();
+  }
   void increment_size() {
     assert(size() < capacity());
     size_ += size_t{1} << HasInfozShift();
@@ -1070,15 +1382,17 @@
 
   // The number of slots we can still fill without needing to rehash.
   // This is stored in the heap allocation before the control bytes.
-  size_t growth_left() const {
-    const size_t* gl_ptr = reinterpret_cast<size_t*>(control()) - 1;
-    assert(reinterpret_cast<uintptr_t>(gl_ptr) % alignof(size_t) == 0);
+  // TODO(b/289225379): experiment with moving growth_info back inline to
+  // increase room for SOO.
+  size_t growth_left() const { return growth_info().GetGrowthLeft(); }
+
+  GrowthInfo& growth_info() {
+    auto* gl_ptr = reinterpret_cast<GrowthInfo*>(control()) - 1;
+    assert(reinterpret_cast<uintptr_t>(gl_ptr) % alignof(GrowthInfo) == 0);
     return *gl_ptr;
   }
-  void set_growth_left(size_t gl) {
-    size_t* gl_ptr = reinterpret_cast<size_t*>(control()) - 1;
-    assert(reinterpret_cast<uintptr_t>(gl_ptr) % alignof(size_t) == 0);
-    *gl_ptr = gl;
+  GrowthInfo growth_info() const {
+    return const_cast<CommonFields*>(this)->growth_info();
   }
 
   bool has_infoz() const {
@@ -1103,12 +1417,8 @@
         should_rehash_for_bug_detection_on_insert(control(), capacity());
   }
   bool should_rehash_for_bug_detection_on_move() const {
-    return CommonFieldsGenerationInfo::
-        should_rehash_for_bug_detection_on_move(control(), capacity());
-  }
-  void maybe_increment_generation_on_move() {
-    if (capacity() == 0) return;
-    increment_generation();
+    return CommonFieldsGenerationInfo::should_rehash_for_bug_detection_on_move(
+        control(), capacity());
   }
   void reset_reserved_growth(size_t reservation) {
     CommonFieldsGenerationInfo::reset_reserved_growth(reservation, size());
@@ -1116,7 +1426,16 @@
 
   // The size of the backing array allocation.
   size_t alloc_size(size_t slot_size, size_t slot_align) const {
-    return AllocSize(capacity(), slot_size, slot_align, has_infoz());
+    return RawHashSetLayout(capacity(), slot_align, has_infoz())
+        .alloc_size(slot_size);
+  }
+
+  // Move fields other than heap_or_soo_.
+  void move_non_heap_or_soo_fields(CommonFields& that) {
+    static_cast<CommonFieldsGenerationInfo&>(*this) =
+        std::move(static_cast<CommonFieldsGenerationInfo&>(that));
+    capacity_ = that.capacity_;
+    size_ = that.size_;
   }
 
   // Returns the number of control bytes set to kDeleted. For testing only.
@@ -1132,21 +1451,12 @@
     return (size_t{1} << HasInfozShift()) - 1;
   }
 
-  // TODO(b/182800944): Investigate removing some of these fields:
-  // - control/slots can be derived from each other
-
-  // The control bytes (and, also, a pointer near to the base of the backing
-  // array).
-  //
-  // This contains `capacity + 1 + NumClonedBytes()` entries, even
-  // when the table is empty (hence EmptyGroup).
-  //
-  // Note that growth_left is stored immediately before this pointer.
-  ctrl_t* control_ = EmptyGroup();
-
-  // The beginning of the slots, located at `SlotOffset()` bytes after
-  // `control`. May be null for empty tables.
-  void* slots_ = nullptr;
+  // We can't assert that SOO is enabled because we don't have SooEnabled(), but
+  // we assert what we can.
+  void AssertInSooMode() const {
+    assert(capacity() == SooCapacity());
+    assert(!has_infoz());
+  }
 
   // The number of slots in the backing array. This is always 2^N-1 for an
   // integer N. NOTE: we tried experimenting with compressing the capacity and
@@ -1154,10 +1464,16 @@
   // power (N in 2^N-1), and (b) storing 2^N as the most significant bit of
   // size_ and storing size in the low bits. Both of these experiments were
   // regressions, presumably because we need capacity to do find operations.
-  size_t capacity_ = 0;
+  size_t capacity_;
 
   // The size and also has one bit that stores whether we have infoz.
-  size_t size_ = 0;
+  // TODO(b/289225379): we could put size_ into HeapOrSoo and make capacity_
+  // encode the size in SOO case. We would be making size()/capacity() more
+  // expensive in order to have more SOO space.
+  size_t size_;
+
+  // Either the control/slots pointers or the SOO slot.
+  HeapOrSoo heap_or_soo_;
 };
 
 template <class Policy, class Hash, class Eq, class Alloc>
@@ -1320,6 +1636,10 @@
                                       const void* const& slot_b) {
   // If either control byte is null, then we can't tell.
   if (ctrl_a == nullptr || ctrl_b == nullptr) return true;
+  const bool a_is_soo = IsSooControl(ctrl_a);
+  if (a_is_soo != IsSooControl(ctrl_b)) return false;
+  if (a_is_soo) return slot_a == slot_b;
+
   const void* low_slot = slot_a;
   const void* hi_slot = slot_b;
   if (ctrl_a > ctrl_b) {
@@ -1343,41 +1663,45 @@
   // - use `ABSL_PREDICT_FALSE()` to provide a compiler hint for code layout
   // - use `ABSL_RAW_LOG()` with a format string to reduce code size and improve
   //   the chances that the hot paths will be inlined.
+
+  // fail_if(is_invalid, message) crashes when is_invalid is true and provides
+  // an error message based on `message`.
+  const auto fail_if = [](bool is_invalid, const char* message) {
+    if (ABSL_PREDICT_FALSE(is_invalid)) {
+      ABSL_RAW_LOG(FATAL, "Invalid iterator comparison. %s", message);
+    }
+  };
+
   const bool a_is_default = ctrl_a == EmptyGroup();
   const bool b_is_default = ctrl_b == EmptyGroup();
-  if (ABSL_PREDICT_FALSE(a_is_default != b_is_default)) {
-    ABSL_RAW_LOG(
-        FATAL,
-        "Invalid iterator comparison. Comparing default-constructed iterator "
-        "with non-default-constructed iterator.");
-  }
   if (a_is_default && b_is_default) return;
+  fail_if(a_is_default != b_is_default,
+          "Comparing default-constructed hashtable iterator with a "
+          "non-default-constructed hashtable iterator.");
 
   if (SwisstableGenerationsEnabled()) {
     if (ABSL_PREDICT_TRUE(generation_ptr_a == generation_ptr_b)) return;
+    // Users don't need to know whether the tables are SOO so don't mention SOO
+    // in the debug message.
+    const bool a_is_soo = IsSooControl(ctrl_a);
+    const bool b_is_soo = IsSooControl(ctrl_b);
+    fail_if(a_is_soo != b_is_soo || (a_is_soo && b_is_soo),
+            "Comparing iterators from different hashtables.");
+
     const bool a_is_empty = IsEmptyGeneration(generation_ptr_a);
     const bool b_is_empty = IsEmptyGeneration(generation_ptr_b);
-    if (a_is_empty != b_is_empty) {
-      ABSL_RAW_LOG(FATAL,
-                   "Invalid iterator comparison. Comparing iterator from a "
-                   "non-empty hashtable with an iterator from an empty "
-                   "hashtable.");
-    }
-    if (a_is_empty && b_is_empty) {
-      ABSL_RAW_LOG(FATAL,
-                   "Invalid iterator comparison. Comparing iterators from "
-                   "different empty hashtables.");
-    }
+    fail_if(a_is_empty != b_is_empty,
+            "Comparing an iterator from an empty hashtable with an iterator "
+            "from a non-empty hashtable.");
+    fail_if(a_is_empty && b_is_empty,
+            "Comparing iterators from different empty hashtables.");
+
     const bool a_is_end = ctrl_a == nullptr;
     const bool b_is_end = ctrl_b == nullptr;
-    if (a_is_end || b_is_end) {
-      ABSL_RAW_LOG(FATAL,
-                   "Invalid iterator comparison. Comparing iterator with an "
-                   "end() iterator from a different hashtable.");
-    }
-    ABSL_RAW_LOG(FATAL,
-                 "Invalid iterator comparison. Comparing non-end() iterators "
-                 "from different hashtables.");
+    fail_if(a_is_end || b_is_end,
+            "Comparing iterator with an end() iterator from a different "
+            "hashtable.");
+    fail_if(true, "Comparing non-end() iterators from different hashtables.");
   } else {
     ABSL_HARDENING_ASSERT(
         AreItersFromSameContainer(ctrl_a, ctrl_b, slot_a, slot_b) &&
@@ -1432,20 +1756,17 @@
 inline FindInfo find_first_non_full(const CommonFields& common, size_t hash) {
   auto seq = probe(common, hash);
   const ctrl_t* ctrl = common.control();
+  if (IsEmptyOrDeleted(ctrl[seq.offset()]) &&
+      !ShouldInsertBackwards(common.capacity(), hash, ctrl)) {
+    return {seq.offset(), /*probe_length=*/0};
+  }
   while (true) {
-    GroupEmptyOrDeleted g{ctrl + seq.offset()};
+    GroupFullEmptyOrDeleted g{ctrl + seq.offset()};
     auto mask = g.MaskEmptyOrDeleted();
     if (mask) {
-#if !defined(NDEBUG)
-      // We want to add entropy even when ASLR is not enabled.
-      // In debug build we will randomly insert in either the front or back of
-      // the group.
-      // TODO(kfm,sbenza): revisit after we do unconditional mixing
-      if (!is_small(common.capacity()) && ShouldInsertBackwards(hash, ctrl)) {
-        return {seq.offset(mask.HighestBitSet()), seq.index()};
-      }
-#endif
-      return {seq.offset(mask.LowestBitSet()), seq.index()};
+      return {
+          seq.offset(GetInsertionOffset(mask, common.capacity(), hash, ctrl)),
+          seq.index()};
     }
     seq.next();
     assert(seq.index() <= common.capacity() && "full table!");
@@ -1462,7 +1783,8 @@
 FindInfo find_first_non_full_outofline(const CommonFields&, size_t);
 
 inline void ResetGrowthLeft(CommonFields& common) {
-  common.set_growth_left(CapacityToGrowth(common.capacity()) - common.size());
+  common.growth_info().InitGrowthLeftNoDeleted(
+      CapacityToGrowth(common.capacity()) - common.size());
 }
 
 // Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire
@@ -1476,43 +1798,140 @@
   SanitizerPoisonMemoryRegion(common.slot_array(), slot_size * capacity);
 }
 
-// Sets `ctrl[i]` to `h`.
-//
-// Unlike setting it directly, this function will perform bounds checks and
-// mirror the value to the cloned tail if necessary.
-inline void SetCtrl(const CommonFields& common, size_t i, ctrl_t h,
-                    size_t slot_size) {
-  const size_t capacity = common.capacity();
-  assert(i < capacity);
-
-  auto* slot_i = static_cast<const char*>(common.slot_array()) + i * slot_size;
+// Sets sanitizer poisoning for slot corresponding to control byte being set.
+inline void DoSanitizeOnSetCtrl(const CommonFields& c, size_t i, ctrl_t h,
+                                size_t slot_size) {
+  assert(i < c.capacity());
+  auto* slot_i = static_cast<const char*>(c.slot_array()) + i * slot_size;
   if (IsFull(h)) {
     SanitizerUnpoisonMemoryRegion(slot_i, slot_size);
   } else {
     SanitizerPoisonMemoryRegion(slot_i, slot_size);
   }
-
-  ctrl_t* ctrl = common.control();
-  ctrl[i] = h;
-  ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h;
 }
 
-// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
-inline void SetCtrl(const CommonFields& common, size_t i, h2_t h,
+// Sets `ctrl[i]` to `h`.
+//
+// Unlike setting it directly, this function will perform bounds checks and
+// mirror the value to the cloned tail if necessary.
+inline void SetCtrl(const CommonFields& c, size_t i, ctrl_t h,
                     size_t slot_size) {
-  SetCtrl(common, i, static_cast<ctrl_t>(h), slot_size);
+  DoSanitizeOnSetCtrl(c, i, h, slot_size);
+  ctrl_t* ctrl = c.control();
+  ctrl[i] = h;
+  ctrl[((i - NumClonedBytes()) & c.capacity()) +
+       (NumClonedBytes() & c.capacity())] = h;
+}
+// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
+inline void SetCtrl(const CommonFields& c, size_t i, h2_t h, size_t slot_size) {
+  SetCtrl(c, i, static_cast<ctrl_t>(h), slot_size);
 }
 
-// growth_left (which is a size_t) is stored with the backing array.
+// Like SetCtrl, but in a single group table, we can save some operations when
+// setting the cloned control byte.
+inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, ctrl_t h,
+                                      size_t slot_size) {
+  assert(is_single_group(c.capacity()));
+  DoSanitizeOnSetCtrl(c, i, h, slot_size);
+  ctrl_t* ctrl = c.control();
+  ctrl[i] = h;
+  ctrl[i + c.capacity() + 1] = h;
+}
+// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
+inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, h2_t h,
+                                      size_t slot_size) {
+  SetCtrlInSingleGroupTable(c, i, static_cast<ctrl_t>(h), slot_size);
+}
+
+// growth_info (which is a size_t) is stored with the backing array.
 constexpr size_t BackingArrayAlignment(size_t align_of_slot) {
-  return (std::max)(align_of_slot, alignof(size_t));
+  return (std::max)(align_of_slot, alignof(GrowthInfo));
 }
 
 // Returns the address of the ith slot in slots where each slot occupies
 // slot_size.
 inline void* SlotAddress(void* slot_array, size_t slot, size_t slot_size) {
-  return reinterpret_cast<void*>(reinterpret_cast<char*>(slot_array) +
-                                 (slot * slot_size));
+  return static_cast<void*>(static_cast<char*>(slot_array) +
+                            (slot * slot_size));
+}
+
+// Iterates over all full slots and calls `cb(const ctrl_t*, SlotType*)`.
+// No insertion to the table allowed during Callback call.
+// Erasure is allowed only for the element passed to the callback.
+template <class SlotType, class Callback>
+ABSL_ATTRIBUTE_ALWAYS_INLINE inline void IterateOverFullSlots(
+    const CommonFields& c, SlotType* slot, Callback cb) {
+  const size_t cap = c.capacity();
+  const ctrl_t* ctrl = c.control();
+  if (is_small(cap)) {
+    // Mirrored/cloned control bytes in small table are also located in the
+    // first group (starting from position 0). We are taking group from position
+    // `capacity` in order to avoid duplicates.
+
+    // Small tables capacity fits into portable group, where
+    // GroupPortableImpl::MaskFull is more efficient for the
+    // capacity <= GroupPortableImpl::kWidth.
+    assert(cap <= GroupPortableImpl::kWidth &&
+           "unexpectedly large small capacity");
+    static_assert(Group::kWidth >= GroupPortableImpl::kWidth,
+                  "unexpected group width");
+    // Group starts from kSentinel slot, so indices in the mask will
+    // be increased by 1.
+    const auto mask = GroupPortableImpl(ctrl + cap).MaskFull();
+    --ctrl;
+    --slot;
+    for (uint32_t i : mask) {
+      cb(ctrl + i, slot + i);
+    }
+    return;
+  }
+  size_t remaining = c.size();
+  ABSL_ATTRIBUTE_UNUSED const size_t original_size_for_assert = remaining;
+  while (remaining != 0) {
+    for (uint32_t i : GroupFullEmptyOrDeleted(ctrl).MaskFull()) {
+      assert(IsFull(ctrl[i]) && "hash table was modified unexpectedly");
+      cb(ctrl + i, slot + i);
+      --remaining;
+    }
+    ctrl += Group::kWidth;
+    slot += Group::kWidth;
+    assert((remaining == 0 || *(ctrl - 1) != ctrl_t::kSentinel) &&
+           "hash table was modified unexpectedly");
+  }
+  // NOTE: erasure of the current element is allowed in callback for
+  // absl::erase_if specialization. So we use `>=`.
+  assert(original_size_for_assert >= c.size() &&
+         "hash table was modified unexpectedly");
+}
+
+template <typename CharAlloc>
+constexpr bool ShouldSampleHashtablezInfo() {
+  // Folks with custom allocators often make unwarranted assumptions about the
+  // behavior of their classes vis-a-vis trivial destructability and what
+  // calls they will or won't make.  Avoid sampling for people with custom
+  // allocators to get us out of this mess.  This is not a hard guarantee but
+  // a workaround while we plan the exact guarantee we want to provide.
+  return std::is_same<CharAlloc, std::allocator<char>>::value;
+}
+
+template <bool kSooEnabled>
+HashtablezInfoHandle SampleHashtablezInfo(size_t sizeof_slot, size_t sizeof_key,
+                                          size_t sizeof_value,
+                                          size_t old_capacity, bool was_soo,
+                                          HashtablezInfoHandle forced_infoz,
+                                          CommonFields& c) {
+  if (forced_infoz.IsSampled()) return forced_infoz;
+  // In SOO, we sample on the first insertion so if this is an empty SOO case
+  // (e.g. when reserve is called), then we still need to sample.
+  if (kSooEnabled && was_soo && c.size() == 0) {
+    return Sample(sizeof_slot, sizeof_key, sizeof_value, SooCapacity());
+  }
+  // For non-SOO cases, we sample whenever the capacity is increasing from zero
+  // to non-zero.
+  if (!kSooEnabled && old_capacity == 0) {
+    return Sample(sizeof_slot, sizeof_key, sizeof_value, 0);
+  }
+  return c.infoz();
 }
 
 // Helper class to perform resize of the hash set.
@@ -1521,17 +1940,21 @@
 // See GrowIntoSingleGroupShuffleControlBytes for details.
 class HashSetResizeHelper {
  public:
-  explicit HashSetResizeHelper(CommonFields& c)
-      : old_ctrl_(c.control()),
-        old_capacity_(c.capacity()),
-        had_infoz_(c.has_infoz()) {}
+  explicit HashSetResizeHelper(CommonFields& c, bool was_soo, bool had_soo_slot,
+                               HashtablezInfoHandle forced_infoz)
+      : old_capacity_(c.capacity()),
+        had_infoz_(c.has_infoz()),
+        was_soo_(was_soo),
+        had_soo_slot_(had_soo_slot),
+        forced_infoz_(forced_infoz) {}
 
-  // Optimized for small groups version of `find_first_non_full` applicable
-  // only right after calling `raw_hash_set::resize`.
+  // Optimized for small groups version of `find_first_non_full`.
+  // Beneficial only right after calling `raw_hash_set::resize`.
+  // It is safe to call in case capacity is big or was not changed, but there
+  // will be no performance benefit.
   // It has implicit assumption that `resize` will call
   // `GrowSizeIntoSingleGroup*` in case `IsGrowingIntoSingleGroupApplicable`.
-  // Falls back to `find_first_non_full` in case of big groups, so it is
-  // safe to use after `rehash_and_grow_if_necessary`.
+  // Falls back to `find_first_non_full` in case of big groups.
   static FindInfo FindFirstNonFullAfterResize(const CommonFields& c,
                                               size_t old_capacity,
                                               size_t hash) {
@@ -1553,14 +1976,30 @@
     return FindInfo{offset, 0};
   }
 
-  ctrl_t* old_ctrl() const { return old_ctrl_; }
+  HeapOrSoo& old_heap_or_soo() { return old_heap_or_soo_; }
+  void* old_soo_data() { return old_heap_or_soo_.get_soo_data(); }
+  ctrl_t* old_ctrl() const {
+    assert(!was_soo_);
+    return old_heap_or_soo_.control();
+  }
+  void* old_slots() const {
+    assert(!was_soo_);
+    return old_heap_or_soo_.slot_array().get();
+  }
   size_t old_capacity() const { return old_capacity_; }
 
+  // Returns the index of the SOO slot when growing from SOO to non-SOO in a
+  // single group. See also InitControlBytesAfterSoo(). It's important to use
+  // index 1 so that when resizing from capacity 1 to 3, we can still have
+  // random iteration order between the first two inserted elements.
+  // I.e. it allows inserting the second element at either index 0 or 2.
+  static size_t SooSlotIndex() { return 1; }
+
   // Allocates a backing array for the hashtable.
   // Reads `capacity` and updates all other fields based on the result of
   // the allocation.
   //
-  // It also may do the folowing actions:
+  // It also may do the following actions:
   // 1. initialize control bytes
   // 2. initialize slots
   // 3. deallocate old slots.
@@ -1590,45 +2029,45 @@
   //
   //  Returns IsGrowingIntoSingleGroupApplicable result to avoid recomputation.
   template <typename Alloc, size_t SizeOfSlot, bool TransferUsesMemcpy,
-            size_t AlignOfSlot>
-  ABSL_ATTRIBUTE_NOINLINE bool InitializeSlots(CommonFields& c, void* old_slots,
-                                               Alloc alloc) {
+            bool SooEnabled, size_t AlignOfSlot>
+  ABSL_ATTRIBUTE_NOINLINE bool InitializeSlots(CommonFields& c, Alloc alloc,
+                                               ctrl_t soo_slot_h2,
+                                               size_t key_size,
+                                               size_t value_size) {
     assert(c.capacity());
-    // Folks with custom allocators often make unwarranted assumptions about the
-    // behavior of their classes vis-a-vis trivial destructability and what
-    // calls they will or won't make.  Avoid sampling for people with custom
-    // allocators to get us out of this mess.  This is not a hard guarantee but
-    // a workaround while we plan the exact guarantee we want to provide.
-    const size_t sample_size =
-        (std::is_same<Alloc, std::allocator<char>>::value &&
-         c.slot_array() == nullptr)
-            ? SizeOfSlot
-            : 0;
     HashtablezInfoHandle infoz =
-        sample_size > 0 ? Sample(sample_size) : c.infoz();
+        ShouldSampleHashtablezInfo<Alloc>()
+            ? SampleHashtablezInfo<SooEnabled>(SizeOfSlot, key_size, value_size,
+                                               old_capacity_, was_soo_,
+                                               forced_infoz_, c)
+            : HashtablezInfoHandle{};
 
     const bool has_infoz = infoz.IsSampled();
-    const size_t cap = c.capacity();
-    const size_t alloc_size =
-        AllocSize(cap, SizeOfSlot, AlignOfSlot, has_infoz);
-    char* mem = static_cast<char*>(
-        Allocate<BackingArrayAlignment(AlignOfSlot)>(&alloc, alloc_size));
+    RawHashSetLayout layout(c.capacity(), AlignOfSlot, has_infoz);
+    char* mem = static_cast<char*>(Allocate<BackingArrayAlignment(AlignOfSlot)>(
+        &alloc, layout.alloc_size(SizeOfSlot)));
     const GenerationType old_generation = c.generation();
-    c.set_generation_ptr(reinterpret_cast<GenerationType*>(
-        mem + GenerationOffset(cap, has_infoz)));
+    c.set_generation_ptr(
+        reinterpret_cast<GenerationType*>(mem + layout.generation_offset()));
     c.set_generation(NextGeneration(old_generation));
-    c.set_control(reinterpret_cast<ctrl_t*>(mem + ControlOffset(has_infoz)));
-    c.set_slots(mem + SlotOffset(cap, AlignOfSlot, has_infoz));
+    c.set_control(reinterpret_cast<ctrl_t*>(mem + layout.control_offset()));
+    c.set_slots(mem + layout.slot_offset());
     ResetGrowthLeft(c);
 
     const bool grow_single_group =
-        IsGrowingIntoSingleGroupApplicable(old_capacity_, c.capacity());
-    if (old_capacity_ != 0 && grow_single_group) {
+        IsGrowingIntoSingleGroupApplicable(old_capacity_, layout.capacity());
+    if (SooEnabled && was_soo_ && grow_single_group) {
+      InitControlBytesAfterSoo(c.control(), soo_slot_h2, layout.capacity());
+      if (TransferUsesMemcpy && had_soo_slot_) {
+        TransferSlotAfterSoo(c, SizeOfSlot);
+      }
+      // SooEnabled implies that old_capacity_ != 0.
+    } else if ((SooEnabled || old_capacity_ != 0) && grow_single_group) {
       if (TransferUsesMemcpy) {
-        GrowSizeIntoSingleGroupTransferable(c, old_slots, SizeOfSlot);
-        DeallocateOld<AlignOfSlot>(alloc, SizeOfSlot, old_slots);
+        GrowSizeIntoSingleGroupTransferable(c, SizeOfSlot);
+        DeallocateOld<AlignOfSlot>(alloc, SizeOfSlot);
       } else {
-        GrowIntoSingleGroupShuffleControlBytes(c.control(), c.capacity());
+        GrowIntoSingleGroupShuffleControlBytes(c.control(), layout.capacity());
       }
     } else {
       ResetCtrl(c, SizeOfSlot);
@@ -1636,8 +2075,8 @@
 
     c.set_has_infoz(has_infoz);
     if (has_infoz) {
-      infoz.RecordStorageChanged(c.size(), cap);
-      if (grow_single_group || old_capacity_ == 0) {
+      infoz.RecordStorageChanged(c.size(), layout.capacity());
+      if ((SooEnabled && was_soo_) || grow_single_group || old_capacity_ == 0) {
         infoz.RecordRehash(0);
       }
       c.set_infoz(infoz);
@@ -1651,21 +2090,22 @@
   // PRECONDITIONS:
   // 1. GrowIntoSingleGroupShuffleControlBytes was already called.
   template <class PolicyTraits, class Alloc>
-  void GrowSizeIntoSingleGroup(CommonFields& c, Alloc& alloc_ref,
-                               typename PolicyTraits::slot_type* old_slots) {
+  void GrowSizeIntoSingleGroup(CommonFields& c, Alloc& alloc_ref) {
     assert(old_capacity_ < Group::kWidth / 2);
     assert(IsGrowingIntoSingleGroupApplicable(old_capacity_, c.capacity()));
     using slot_type = typename PolicyTraits::slot_type;
     assert(is_single_group(c.capacity()));
 
-    auto* new_slots = reinterpret_cast<slot_type*>(c.slot_array());
+    auto* new_slots = static_cast<slot_type*>(c.slot_array());
+    auto* old_slots_ptr = static_cast<slot_type*>(old_slots());
 
     size_t shuffle_bit = old_capacity_ / 2 + 1;
     for (size_t i = 0; i < old_capacity_; ++i) {
-      if (IsFull(old_ctrl_[i])) {
+      if (IsFull(old_ctrl()[i])) {
         size_t new_i = i ^ shuffle_bit;
         SanitizerUnpoisonMemoryRegion(new_slots + new_i, sizeof(slot_type));
-        PolicyTraits::transfer(&alloc_ref, new_slots + new_i, old_slots + i);
+        PolicyTraits::transfer(&alloc_ref, new_slots + new_i,
+                               old_slots_ptr + i);
       }
     }
     PoisonSingleGroupEmptySlots(c, sizeof(slot_type));
@@ -1673,11 +2113,12 @@
 
   // Deallocates old backing array.
   template <size_t AlignOfSlot, class CharAlloc>
-  void DeallocateOld(CharAlloc alloc_ref, size_t slot_size, void* old_slots) {
-    SanitizerUnpoisonMemoryRegion(old_slots, slot_size * old_capacity_);
+  void DeallocateOld(CharAlloc alloc_ref, size_t slot_size) {
+    SanitizerUnpoisonMemoryRegion(old_slots(), slot_size * old_capacity_);
+    auto layout = RawHashSetLayout(old_capacity_, AlignOfSlot, had_infoz_);
     Deallocate<BackingArrayAlignment(AlignOfSlot)>(
-        &alloc_ref, old_ctrl_ - ControlOffset(had_infoz_),
-        AllocSize(old_capacity_, slot_size, AlignOfSlot, had_infoz_));
+        &alloc_ref, old_ctrl() - layout.control_offset(),
+        layout.alloc_size(slot_size));
   }
 
  private:
@@ -1692,8 +2133,12 @@
   // Relocates control bytes and slots into new single group for
   // transferable objects.
   // Must be called only if IsGrowingIntoSingleGroupApplicable returned true.
-  void GrowSizeIntoSingleGroupTransferable(CommonFields& c, void* old_slots,
-                                           size_t slot_size);
+  void GrowSizeIntoSingleGroupTransferable(CommonFields& c, size_t slot_size);
+
+  // If there was an SOO slot and slots are transferable, transfers the SOO slot
+  // into the new heap allocation. Must be called only if
+  // IsGrowingIntoSingleGroupApplicable returned true.
+  void TransferSlotAfterSoo(CommonFields& c, size_t slot_size);
 
   // Shuffle control bits deterministically to the next capacity.
   // Returns offset for newly added element with given hash.
@@ -1726,6 +2171,13 @@
   void GrowIntoSingleGroupShuffleControlBytes(ctrl_t* new_ctrl,
                                               size_t new_capacity) const;
 
+  // If the table was SOO, initializes new control bytes. `h2` is the control
+  // byte corresponding to the full slot. Must be called only if
+  // IsGrowingIntoSingleGroupApplicable returned true.
+  // Requires: `had_soo_slot_ || h2 == ctrl_t::kEmpty`.
+  void InitControlBytesAfterSoo(ctrl_t* new_ctrl, ctrl_t h2,
+                                size_t new_capacity);
+
   // Shuffle trivially transferable slots in the way consistent with
   // GrowIntoSingleGroupShuffleControlBytes.
   //
@@ -1739,8 +2191,7 @@
   // 1. new_slots are transferred from old_slots_ consistent with
   //    GrowIntoSingleGroupShuffleControlBytes.
   // 2. Empty new_slots are *not* poisoned.
-  void GrowIntoSingleGroupShuffleTransferableSlots(void* old_slots,
-                                                   void* new_slots,
+  void GrowIntoSingleGroupShuffleTransferableSlots(void* new_slots,
                                                    size_t slot_size) const;
 
   // Poison empty slots that were transferred using the deterministic algorithm
@@ -1760,11 +2211,24 @@
     }
   }
 
-  ctrl_t* old_ctrl_;
+  HeapOrSoo old_heap_or_soo_;
   size_t old_capacity_;
   bool had_infoz_;
+  bool was_soo_;
+  bool had_soo_slot_;
+  // Either null infoz or a pre-sampled forced infoz for SOO tables.
+  HashtablezInfoHandle forced_infoz_;
 };
 
+inline void PrepareInsertCommon(CommonFields& common) {
+  common.increment_size();
+  common.maybe_increment_generation_on_insert();
+}
+
+// Like prepare_insert, but for the case of inserting into a full SOO table.
+size_t PrepareInsertAfterSoo(size_t hash, size_t slot_size,
+                             CommonFields& common);
+
 // PolicyFunctions bundles together some information for a particular
 // raw_hash_set<T, ...> instantiation. This information is passed to
 // type-erased functions that want to do small amounts of type-specific
@@ -1772,21 +2236,29 @@
 struct PolicyFunctions {
   size_t slot_size;
 
-  // Returns the hash of the pointed-to slot.
-  size_t (*hash_slot)(void* set, void* slot);
+  // Returns the pointer to the hash function stored in the set.
+  const void* (*hash_fn)(const CommonFields& common);
 
-  // Transfer the contents of src_slot to dst_slot.
+  // Returns the hash of the pointed-to slot.
+  size_t (*hash_slot)(const void* hash_fn, void* slot);
+
+  // Transfers the contents of src_slot to dst_slot.
   void (*transfer)(void* set, void* dst_slot, void* src_slot);
 
-  // Deallocate the backing store from common.
+  // Deallocates the backing store from common.
   void (*dealloc)(CommonFields& common, const PolicyFunctions& policy);
+
+  // Resizes set to the new capacity.
+  // Arguments are used as in raw_hash_set::resize_impl.
+  void (*resize)(CommonFields& common, size_t new_capacity,
+                 HashtablezInfoHandle forced_infoz);
 };
 
 // ClearBackingArray clears the backing array, either modifying it in place,
 // or creating a new one based on the value of "reuse".
 // REQUIRES: c.capacity > 0
 void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
-                       bool reuse);
+                       bool reuse, bool soo_enabled);
 
 // Type-erased version of raw_hash_set::erase_meta_only.
 void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size);
@@ -1817,9 +2289,26 @@
   memcpy(dst, src, SizeOfSlot);
 }
 
-// Type-erased version of raw_hash_set::drop_deletes_without_resize.
-void DropDeletesWithoutResize(CommonFields& common,
-                              const PolicyFunctions& policy, void* tmp_space);
+// Type erased raw_hash_set::get_hash_ref_fn for the empty hash function case.
+const void* GetHashRefForEmptyHasher(const CommonFields& common);
+
+// Given the hash of a value not currently in the table and the first empty
+// slot in the probe sequence, finds a viable slot index to insert it at.
+//
+// In case there's no space left, the table can be resized or rehashed
+// (for tables with deleted slots, see FindInsertPositionWithGrowthOrRehash).
+//
+// In the case of absence of deleted slots and positive growth_left, the element
+// can be inserted in the provided `target` position.
+//
+// When the table has deleted slots (according to GrowthInfo), the target
+// position will be searched one more time using `find_first_non_full`.
+//
+// REQUIRES: Table is not SOO.
+// REQUIRES: At least one non-full slot available.
+// REQUIRES: `target` is a valid empty position to insert.
+size_t PrepareInsertNonSoo(CommonFields& common, size_t hash, FindInfo target,
+                           const PolicyFunctions& policy);
 
 // A SwissTable.
 //
@@ -1875,6 +2364,26 @@
   using key_arg = typename KeyArgImpl::template type<K, key_type>;
 
  private:
+  // TODO(b/289225379): we could add extra SOO space inside raw_hash_set
+  // after CommonFields to allow inlining larger slot_types (e.g. std::string),
+  // but it's a bit complicated if we want to support incomplete mapped_type in
+  // flat_hash_map. We could potentially do this for flat_hash_set and for an
+  // allowlist of `mapped_type`s of flat_hash_map that includes e.g. arithmetic
+  // types, strings, cords, and pairs/tuples of allowlisted types.
+  constexpr static bool SooEnabled() {
+    return PolicyTraits::soo_enabled() &&
+           sizeof(slot_type) <= sizeof(HeapOrSoo) &&
+           alignof(slot_type) <= alignof(HeapOrSoo);
+  }
+
+  // Whether `size` fits in the SOO capacity of this table.
+  bool fits_in_soo(size_t size) const {
+    return SooEnabled() && size <= SooCapacity();
+  }
+  // Whether this table is in SOO mode or non-SOO mode.
+  bool is_soo() const { return fits_in_soo(capacity()); }
+  bool is_full_soo() const { return is_soo() && !empty(); }
+
   // Give an early error when key_type is not hashable/eq.
   auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k));
   auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k));
@@ -1928,6 +2437,7 @@
 
   class iterator : private HashSetIteratorGenerationInfo {
     friend class raw_hash_set;
+    friend struct HashtableFreeFunctionsAccess;
 
    public:
     using iterator_category = std::forward_iterator_tag;
@@ -1958,6 +2468,7 @@
       ++ctrl_;
       ++slot_;
       skip_empty_or_deleted();
+      if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr;
       return *this;
     }
     // PRECONDITION: not an end() iterator.
@@ -1988,22 +2499,31 @@
       // not equal to any end iterator.
       ABSL_ASSUME(ctrl != nullptr);
     }
+    // This constructor is used in begin() to avoid an MSan
+    // use-of-uninitialized-value error. Delegating from this constructor to
+    // the previous one doesn't avoid the error.
+    iterator(ctrl_t* ctrl, MaybeInitializedPtr slot,
+             const GenerationType* generation_ptr)
+        : HashSetIteratorGenerationInfo(generation_ptr),
+          ctrl_(ctrl),
+          slot_(to_slot(slot.get())) {
+      // This assumption helps the compiler know that any non-end iterator is
+      // not equal to any end iterator.
+      ABSL_ASSUME(ctrl != nullptr);
+    }
     // For end() iterators.
     explicit iterator(const GenerationType* generation_ptr)
         : HashSetIteratorGenerationInfo(generation_ptr), ctrl_(nullptr) {}
 
-    // Fixes up `ctrl_` to point to a full by advancing it and `slot_` until
-    // they reach one.
-    //
-    // If a sentinel is reached, we null `ctrl_` out instead.
+    // Fixes up `ctrl_` to point to a full or sentinel by advancing `ctrl_` and
+    // `slot_` until they reach one.
     void skip_empty_or_deleted() {
       while (IsEmptyOrDeleted(*ctrl_)) {
         uint32_t shift =
-            GroupEmptyOrDeleted{ctrl_}.CountLeadingEmptyOrDeleted();
+            GroupFullEmptyOrDeleted{ctrl_}.CountLeadingEmptyOrDeleted();
         ctrl_ += shift;
         slot_ += shift;
       }
-      if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr;
     }
 
     ctrl_t* control() const { return ctrl_; }
@@ -2091,8 +2611,9 @@
       size_t bucket_count, const hasher& hash = hasher(),
       const key_equal& eq = key_equal(),
       const allocator_type& alloc = allocator_type())
-      : settings_(CommonFields{}, hash, eq, alloc) {
-    if (bucket_count) {
+      : settings_(CommonFields::CreateDefault<SooEnabled()>(), hash, eq,
+                  alloc) {
+    if (bucket_count > (SooEnabled() ? SooCapacity() : 0)) {
       resize(NormalizeCapacity(bucket_count));
     }
   }
@@ -2193,22 +2714,69 @@
                                that.alloc_ref())) {}
 
   raw_hash_set(const raw_hash_set& that, const allocator_type& a)
-      : raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) {
+      : raw_hash_set(GrowthToLowerboundCapacity(that.size()), that.hash_ref(),
+                     that.eq_ref(), a) {
     const size_t size = that.size();
-    if (size == 0) return;
-    reserve(size);
-    // Because the table is guaranteed to be empty, we can do something faster
-    // than a full `insert`.
-    for (const auto& v : that) {
-      const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v);
-      auto target = find_first_non_full_outofline(common(), hash);
-      SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type));
-      emplace_at(target.offset, v);
-      common().maybe_increment_generation_on_insert();
-      infoz().RecordInsert(hash, target.probe_length);
+    if (size == 0) {
+      return;
+    }
+    // We don't use `that.is_soo()` here because `that` can have non-SOO
+    // capacity but have a size that fits into SOO capacity.
+    if (fits_in_soo(size)) {
+      assert(size == 1);
+      common().set_full_soo();
+      emplace_at(soo_iterator(), *that.begin());
+      const HashtablezInfoHandle infoz = try_sample_soo();
+      if (infoz.IsSampled()) resize_with_soo_infoz(infoz);
+      return;
+    }
+    assert(!that.is_soo());
+    const size_t cap = capacity();
+    // Note about single group tables:
+    // 1. It is correct to have any order of elements.
+    // 2. Order has to be non deterministic.
+    // 3. We are assigning elements with arbitrary `shift` starting from
+    //    `capacity + shift` position.
+    // 4. `shift` must be coprime with `capacity + 1` in order to be able to use
+    //     modular arithmetic to traverse all positions, instead if cycling
+    //     through a subset of positions. Odd numbers are coprime with any
+    //     `capacity + 1` (2^N).
+    size_t offset = cap;
+    const size_t shift =
+        is_single_group(cap) ? (PerTableSalt(control()) | 1) : 0;
+    IterateOverFullSlots(
+        that.common(), that.slot_array(),
+        [&](const ctrl_t* that_ctrl,
+            slot_type* that_slot) ABSL_ATTRIBUTE_ALWAYS_INLINE {
+          if (shift == 0) {
+            // Big tables case. Position must be searched via probing.
+            // The table is guaranteed to be empty, so we can do faster than
+            // a full `insert`.
+            const size_t hash = PolicyTraits::apply(
+                HashElement{hash_ref()}, PolicyTraits::element(that_slot));
+            FindInfo target = find_first_non_full_outofline(common(), hash);
+            infoz().RecordInsert(hash, target.probe_length);
+            offset = target.offset;
+          } else {
+            // Small tables case. Next position is computed via shift.
+            offset = (offset + shift) & cap;
+          }
+          const h2_t h2 = static_cast<h2_t>(*that_ctrl);
+          assert(  // We rely that hash is not changed for small tables.
+              H2(PolicyTraits::apply(HashElement{hash_ref()},
+                                     PolicyTraits::element(that_slot))) == h2 &&
+              "hash function value changed unexpectedly during the copy");
+          SetCtrl(common(), offset, h2, sizeof(slot_type));
+          emplace_at(iterator_at(offset), PolicyTraits::element(that_slot));
+          common().maybe_increment_generation_on_insert();
+        });
+    if (shift != 0) {
+      // On small table copy we do not record individual inserts.
+      // RecordInsert requires hash, but it is unknown for small tables.
+      infoz().RecordStorageChanged(size, cap);
     }
     common().set_size(size);
-    set_growth_left(growth_left() - size);
+    growth_info().OverwriteManyEmptyAsFull(size);
   }
 
   ABSL_ATTRIBUTE_NOINLINE raw_hash_set(raw_hash_set&& that) noexcept(
@@ -2220,16 +2788,22 @@
          // would create a nullptr functor that cannot be called.
          // TODO(b/296061262): move instead of copying hash/eq/alloc.
          // Note: we avoid using exchange for better generated code.
-        settings_(std::move(that.common()), that.hash_ref(), that.eq_ref(),
-                  that.alloc_ref()) {
-    that.common() = CommonFields{};
+        settings_(PolicyTraits::transfer_uses_memcpy() || !that.is_full_soo()
+                      ? std::move(that.common())
+                      : CommonFields{full_soo_tag_t{}},
+                  that.hash_ref(), that.eq_ref(), that.alloc_ref()) {
+    if (!PolicyTraits::transfer_uses_memcpy() && that.is_full_soo()) {
+      transfer(soo_slot(), that.soo_slot());
+    }
+    that.common() = CommonFields::CreateDefault<SooEnabled()>();
     maybe_increment_generation_or_rehash_on_move();
   }
 
   raw_hash_set(raw_hash_set&& that, const allocator_type& a)
-      : settings_(CommonFields{}, that.hash_ref(), that.eq_ref(), a) {
+      : settings_(CommonFields::CreateDefault<SooEnabled()>(), that.hash_ref(),
+                  that.eq_ref(), a) {
     if (a == that.alloc_ref()) {
-      std::swap(common(), that.common());
+      swap_common(that);
       maybe_increment_generation_or_rehash_on_move();
     } else {
       move_elements_allocs_unequal(std::move(that));
@@ -2264,8 +2838,12 @@
   ~raw_hash_set() { destructor_impl(); }
 
   iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND {
-    auto it = iterator_at(0);
+    if (ABSL_PREDICT_FALSE(empty())) return end();
+    if (is_soo()) return soo_iterator();
+    iterator it = {control(), common().slots_union(),
+                   common().generation_ptr()};
     it.skip_empty_or_deleted();
+    assert(IsFull(*it.control()));
     return it;
   }
   iterator end() ABSL_ATTRIBUTE_LIFETIME_BOUND {
@@ -2285,7 +2863,14 @@
 
   bool empty() const { return !size(); }
   size_t size() const { return common().size(); }
-  size_t capacity() const { return common().capacity(); }
+  size_t capacity() const {
+    const size_t cap = common().capacity();
+    // Compiler complains when using functions in assume so use local variables.
+    ABSL_ATTRIBUTE_UNUSED static constexpr bool kEnabled = SooEnabled();
+    ABSL_ATTRIBUTE_UNUSED static constexpr size_t kCapacity = SooCapacity();
+    ABSL_ASSUME(!kEnabled || cap >= kCapacity);
+    return cap;
+  }
   size_t max_size() const { return (std::numeric_limits<size_t>::max)(); }
 
   ABSL_ATTRIBUTE_REINITIALIZES void clear() {
@@ -2299,9 +2884,13 @@
     const size_t cap = capacity();
     if (cap == 0) {
       // Already guaranteed to be empty; so nothing to do.
+    } else if (is_soo()) {
+      if (!empty()) destroy(soo_slot());
+      common().set_empty_soo();
     } else {
       destroy_slots();
-      ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/cap < 128);
+      ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/cap < 128,
+                        SooEnabled());
     }
     common().set_reserved_growth(0);
     common().set_reservation_size(0);
@@ -2432,7 +3021,7 @@
   std::pair<iterator, bool> emplace(Args&&... args)
       ABSL_ATTRIBUTE_LIFETIME_BOUND {
     alignas(slot_type) unsigned char raw[sizeof(slot_type)];
-    slot_type* slot = reinterpret_cast<slot_type*>(&raw);
+    slot_type* slot = to_slot(&raw);
 
     construct(slot, std::forward<Args>(args)...);
     const auto& elem = PolicyTraits::element(slot);
@@ -2496,11 +3085,11 @@
                         F&& f) ABSL_ATTRIBUTE_LIFETIME_BOUND {
     auto res = find_or_prepare_insert(key);
     if (res.second) {
-      slot_type* slot = slot_array() + res.first;
+      slot_type* slot = res.first.slot();
       std::forward<F>(f)(constructor(&alloc_ref(), &slot));
       assert(!slot);
     }
-    return iterator_at(res.first);
+    return res.first;
   }
 
   // Extension API: support for heterogeneous keys.
@@ -2524,7 +3113,7 @@
   // this method returns void to reduce algorithmic complexity to O(1).  The
   // iterator is invalidated, so any increment should be done before calling
   // erase.  In order to erase while iterating across a map, use the following
-  // idiom (which also works for standard containers):
+  // idiom (which also works for some standard containers):
   //
   // for (auto it = m.begin(), end = m.end(); it != end;) {
   //   // `erase()` will invalidate `it`, so advance `it` first.
@@ -2540,7 +3129,11 @@
   void erase(iterator it) {
     AssertIsFull(it.control(), it.generation(), it.generation_ptr(), "erase()");
     destroy(it.slot());
-    erase_meta_only(it);
+    if (is_soo()) {
+      common().set_empty_soo();
+    } else {
+      erase_meta_only(it);
+    }
   }
 
   iterator erase(const_iterator first,
@@ -2548,12 +3141,19 @@
     // We check for empty first because ClearBackingArray requires that
     // capacity() > 0 as a precondition.
     if (empty()) return end();
+    if (first == last) return last.inner_;
+    if (is_soo()) {
+      destroy(soo_slot());
+      common().set_empty_soo();
+      return end();
+    }
     if (first == begin() && last == end()) {
       // TODO(ezb): we access control bytes in destroy_slots so it could make
       // sense to combine destroy_slots and ClearBackingArray to avoid cache
       // misses when the table is large. Note that we also do this in clear().
       destroy_slots();
-      ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/true);
+      ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/true,
+                        SooEnabled());
       common().set_reserved_growth(common().reservation_size());
       return end();
     }
@@ -2568,13 +3168,21 @@
   template <typename H, typename E>
   void merge(raw_hash_set<Policy, H, E, Alloc>& src) {  // NOLINT
     assert(this != &src);
+    // Returns whether insertion took place.
+    const auto insert_slot = [this](slot_type* src_slot) {
+      return PolicyTraits::apply(InsertSlot<false>{*this, std::move(*src_slot)},
+                                 PolicyTraits::element(src_slot))
+          .second;
+    };
+
+    if (src.is_soo()) {
+      if (src.empty()) return;
+      if (insert_slot(src.soo_slot())) src.common().set_empty_soo();
+      return;
+    }
     for (auto it = src.begin(), e = src.end(); it != e;) {
       auto next = std::next(it);
-      if (PolicyTraits::apply(InsertSlot<false>{*this, std::move(*it.slot())},
-                              PolicyTraits::element(it.slot()))
-              .second) {
-        src.erase_meta_only(it);
-      }
+      if (insert_slot(it.slot())) src.erase_meta_only(it);
       it = next;
     }
   }
@@ -2588,7 +3196,11 @@
     AssertIsFull(position.control(), position.inner_.generation(),
                  position.inner_.generation_ptr(), "extract()");
     auto node = CommonAccess::Transfer<node_type>(alloc_ref(), position.slot());
-    erase_meta_only(position);
+    if (is_soo()) {
+      common().set_empty_soo();
+    } else {
+      erase_meta_only(position);
+    }
     return node;
   }
 
@@ -2605,7 +3217,7 @@
       IsNoThrowSwappable<allocator_type>(
           typename AllocTraits::propagate_on_container_swap{})) {
     using std::swap;
-    swap(common(), that.common());
+    swap_common(that);
     swap(hash_ref(), that.hash_ref());
     swap(eq_ref(), that.eq_ref());
     SwapAlloc(alloc_ref(), that.alloc_ref(),
@@ -2613,17 +3225,41 @@
   }
 
   void rehash(size_t n) {
-    if (n == 0 && capacity() == 0) return;
-    if (n == 0 && size() == 0) {
-      ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false);
-      return;
+    const size_t cap = capacity();
+    if (n == 0) {
+      if (cap == 0 || is_soo()) return;
+      if (empty()) {
+        ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false,
+                          SooEnabled());
+        return;
+      }
+      if (fits_in_soo(size())) {
+        // When the table is already sampled, we keep it sampled.
+        if (infoz().IsSampled()) {
+          const size_t kInitialSampledCapacity = NextCapacity(SooCapacity());
+          if (capacity() > kInitialSampledCapacity) {
+            resize(kInitialSampledCapacity);
+          }
+          // This asserts that we didn't lose sampling coverage in `resize`.
+          assert(infoz().IsSampled());
+          return;
+        }
+        alignas(slot_type) unsigned char slot_space[sizeof(slot_type)];
+        slot_type* tmp_slot = to_slot(slot_space);
+        transfer(tmp_slot, begin().slot());
+        ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false,
+                          SooEnabled());
+        transfer(soo_slot(), tmp_slot);
+        common().set_full_soo();
+        return;
+      }
     }
 
     // bitor is a faster way of doing `max` here. We will round up to the next
     // power-of-2-minus-1, so bitor is good enough.
     auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size()));
     // n == 0 unconditionally rehashes as per the standard.
-    if (n == 0 || m > capacity()) {
+    if (n == 0 || m > cap) {
       resize(m);
 
       // This is after resize, to ensure that we have completed the allocation
@@ -2633,7 +3269,9 @@
   }
 
   void reserve(size_t n) {
-    if (n > size() + growth_left()) {
+    const size_t max_size_before_growth =
+        is_soo() ? SooCapacity() : size() + growth_left();
+    if (n > max_size_before_growth) {
       size_t m = GrowthToLowerboundCapacity(n);
       resize(NormalizeCapacity(m));
 
@@ -2666,6 +3304,7 @@
   // specific benchmarks indicating its importance.
   template <class K = key_type>
   void prefetch(const key_arg<K>& key) const {
+    if (SooEnabled() ? is_soo() : capacity() == 0) return;
     (void)key;
     // Avoid probing if we won't be able to prefetch the addresses received.
 #ifdef ABSL_HAVE_PREFETCH
@@ -2686,26 +3325,16 @@
   template <class K = key_type>
   iterator find(const key_arg<K>& key,
                 size_t hash) ABSL_ATTRIBUTE_LIFETIME_BOUND {
-    auto seq = probe(common(), hash);
-    slot_type* slot_ptr = slot_array();
-    const ctrl_t* ctrl = control();
-    while (true) {
-      Group g{ctrl + seq.offset()};
-      for (uint32_t i : g.Match(H2(hash))) {
-        if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
-                EqualElement<K>{key, eq_ref()},
-                PolicyTraits::element(slot_ptr + seq.offset(i)))))
-          return iterator_at(seq.offset(i));
-      }
-      if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return end();
-      seq.next();
-      assert(seq.index() <= capacity() && "full table!");
-    }
+    AssertHashEqConsistent(key);
+    if (is_soo()) return find_soo(key);
+    return find_non_soo(key, hash);
   }
   template <class K = key_type>
   iterator find(const key_arg<K>& key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    AssertHashEqConsistent(key);
+    if (is_soo()) return find_soo(key);
     prefetch_heap_block();
-    return find(key, hash_ref()(key));
+    return find_non_soo(key, hash_ref()(key));
   }
 
   template <class K = key_type>
@@ -2716,8 +3345,7 @@
   template <class K = key_type>
   const_iterator find(const key_arg<K>& key) const
       ABSL_ATTRIBUTE_LIFETIME_BOUND {
-    prefetch_heap_block();
-    return find(key, hash_ref()(key));
+    return const_cast<raw_hash_set*>(this)->find(key);
   }
 
   template <class K = key_type>
@@ -2791,6 +3419,8 @@
   friend struct absl::container_internal::hashtable_debug_internal::
       HashtableDebugAccess;
 
+  friend struct absl::container_internal::HashtableFreeFunctionsAccess;
+
   struct FindElement {
     template <class K, class... Args>
     const_iterator operator()(const K& key, Args&&...) const {
@@ -2824,7 +3454,7 @@
       if (res.second) {
         s.emplace_at(res.first, std::forward<Args>(args)...);
       }
-      return {s.iterator_at(res.first), res.second};
+      return res;
     }
     raw_hash_set& s;
   };
@@ -2835,11 +3465,11 @@
     std::pair<iterator, bool> operator()(const K& key, Args&&...) && {
       auto res = s.find_or_prepare_insert(key);
       if (res.second) {
-        s.transfer(s.slot_array() + res.first, &slot);
+        s.transfer(res.first.slot(), &slot);
       } else if (do_destroy) {
         s.destroy(&slot);
       }
-      return {s.iterator_at(res.first), res.second};
+      return res;
     }
     raw_hash_set& s;
     // Constructed slot. Either moved into place or destroyed.
@@ -2858,17 +3488,55 @@
     PolicyTraits::transfer(&alloc_ref(), to, from);
   }
 
-  inline void destroy_slots() {
-    const size_t cap = capacity();
+  // TODO(b/289225379): consider having a helper class that has the impls for
+  // SOO functionality.
+  template <class K = key_type>
+  iterator find_soo(const key_arg<K>& key) {
+    assert(is_soo());
+    return empty() || !PolicyTraits::apply(EqualElement<K>{key, eq_ref()},
+                                           PolicyTraits::element(soo_slot()))
+               ? end()
+               : soo_iterator();
+  }
+
+  template <class K = key_type>
+  iterator find_non_soo(const key_arg<K>& key, size_t hash) {
+    assert(!is_soo());
+    auto seq = probe(common(), hash);
     const ctrl_t* ctrl = control();
-    slot_type* slot = slot_array();
-    for (size_t i = 0; i != cap; ++i) {
-      if (IsFull(ctrl[i])) {
-        destroy(slot + i);
+    while (true) {
+      Group g{ctrl + seq.offset()};
+      for (uint32_t i : g.Match(H2(hash))) {
+        if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
+                EqualElement<K>{key, eq_ref()},
+                PolicyTraits::element(slot_array() + seq.offset(i)))))
+          return iterator_at(seq.offset(i));
       }
+      if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return end();
+      seq.next();
+      assert(seq.index() <= capacity() && "full table!");
     }
   }
 
+  // Conditionally samples hashtablez for SOO tables. This should be called on
+  // insertion into an empty SOO table and in copy construction when the size
+  // can fit in SOO capacity.
+  inline HashtablezInfoHandle try_sample_soo() {
+    assert(is_soo());
+    if (!ShouldSampleHashtablezInfo<CharAlloc>()) return HashtablezInfoHandle{};
+    return Sample(sizeof(slot_type), sizeof(key_type), sizeof(value_type),
+                  SooCapacity());
+  }
+
+  inline void destroy_slots() {
+    assert(!is_soo());
+    if (PolicyTraits::template destroy_is_trivial<Alloc>()) return;
+    IterateOverFullSlots(
+        common(), slot_array(),
+        [&](const ctrl_t*, slot_type* slot)
+            ABSL_ATTRIBUTE_ALWAYS_INLINE { this->destroy(slot); });
+  }
+
   inline void dealloc() {
     assert(capacity() != 0);
     // Unpoison before returning the memory to the allocator.
@@ -2881,6 +3549,12 @@
 
   inline void destructor_impl() {
     if (capacity() == 0) return;
+    if (is_soo()) {
+      if (!empty()) {
+        ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(destroy(soo_slot()));
+      }
+      return;
+    }
     destroy_slots();
     dealloc();
   }
@@ -2890,10 +3564,16 @@
   // This merely updates the pertinent control byte. This can be used in
   // conjunction with Policy::transfer to move the object to another place.
   void erase_meta_only(const_iterator it) {
+    assert(!is_soo());
     EraseMetaOnly(common(), static_cast<size_t>(it.control() - control()),
                   sizeof(slot_type));
   }
 
+  size_t hash_of(slot_type* slot) const {
+    return PolicyTraits::apply(HashElement{hash_ref()},
+                               PolicyTraits::element(slot));
+  }
+
   // Resizes table to the new capacity and move all elements to the new
   // positions accordingly.
   //
@@ -2902,143 +3582,165 @@
   // HashSetResizeHelper::FindFirstNonFullAfterResize(
   //    common(), old_capacity, hash)
   // can be called right after `resize`.
-  ABSL_ATTRIBUTE_NOINLINE void resize(size_t new_capacity) {
+  void resize(size_t new_capacity) {
+    raw_hash_set::resize_impl(common(), new_capacity, HashtablezInfoHandle{});
+  }
+
+  // As above, except that we also accept a pre-sampled, forced infoz for
+  // SOO tables, since they need to switch from SOO to heap in order to
+  // store the infoz.
+  void resize_with_soo_infoz(HashtablezInfoHandle forced_infoz) {
+    assert(forced_infoz.IsSampled());
+    raw_hash_set::resize_impl(common(), NextCapacity(SooCapacity()),
+                              forced_infoz);
+  }
+
+  // Resizes set to the new capacity.
+  // It is a static function in order to use its pointer in GetPolicyFunctions.
+  ABSL_ATTRIBUTE_NOINLINE static void resize_impl(
+      CommonFields& common, size_t new_capacity,
+      HashtablezInfoHandle forced_infoz) {
+    raw_hash_set* set = reinterpret_cast<raw_hash_set*>(&common);
     assert(IsValidCapacity(new_capacity));
-    HashSetResizeHelper resize_helper(common());
-    auto* old_slots = slot_array();
-    common().set_capacity(new_capacity);
+    assert(!set->fits_in_soo(new_capacity));
+    const bool was_soo = set->is_soo();
+    const bool had_soo_slot = was_soo && !set->empty();
+    const ctrl_t soo_slot_h2 =
+        had_soo_slot ? static_cast<ctrl_t>(H2(set->hash_of(set->soo_slot())))
+                     : ctrl_t::kEmpty;
+    HashSetResizeHelper resize_helper(common, was_soo, had_soo_slot,
+                                      forced_infoz);
+    // Initialize HashSetResizeHelper::old_heap_or_soo_. We can't do this in
+    // HashSetResizeHelper constructor because it can't transfer slots when
+    // transfer_uses_memcpy is false.
+    // TODO(b/289225379): try to handle more of the SOO cases inside
+    // InitializeSlots. See comment on cl/555990034 snapshot #63.
+    if (PolicyTraits::transfer_uses_memcpy() || !had_soo_slot) {
+      resize_helper.old_heap_or_soo() = common.heap_or_soo();
+    } else {
+      set->transfer(set->to_slot(resize_helper.old_soo_data()),
+                    set->soo_slot());
+    }
+    common.set_capacity(new_capacity);
     // Note that `InitializeSlots` does different number initialization steps
     // depending on the values of `transfer_uses_memcpy` and capacities.
     // Refer to the comment in `InitializeSlots` for more details.
     const bool grow_single_group =
         resize_helper.InitializeSlots<CharAlloc, sizeof(slot_type),
                                       PolicyTraits::transfer_uses_memcpy(),
-                                      alignof(slot_type)>(
-            common(), const_cast<std::remove_const_t<slot_type>*>(old_slots),
-            CharAlloc(alloc_ref()));
+                                      SooEnabled(), alignof(slot_type)>(
+            common, CharAlloc(set->alloc_ref()), soo_slot_h2, sizeof(key_type),
+            sizeof(value_type));
 
-    if (resize_helper.old_capacity() == 0) {
+    // In the SooEnabled() case, capacity is never 0 so we don't check.
+    if (!SooEnabled() && resize_helper.old_capacity() == 0) {
       // InitializeSlots did all the work including infoz().RecordRehash().
       return;
     }
+    assert(resize_helper.old_capacity() > 0);
+    // Nothing more to do in this case.
+    if (was_soo && !had_soo_slot) return;
 
+    slot_type* new_slots = set->slot_array();
     if (grow_single_group) {
       if (PolicyTraits::transfer_uses_memcpy()) {
         // InitializeSlots did all the work.
         return;
       }
-      // We want GrowSizeIntoSingleGroup to be called here in order to make
-      // InitializeSlots not depend on PolicyTraits.
-      resize_helper.GrowSizeIntoSingleGroup<PolicyTraits>(common(), alloc_ref(),
-                                                          old_slots);
+      if (was_soo) {
+        set->transfer(new_slots + resize_helper.SooSlotIndex(),
+                      to_slot(resize_helper.old_soo_data()));
+        return;
+      } else {
+        // We want GrowSizeIntoSingleGroup to be called here in order to make
+        // InitializeSlots not depend on PolicyTraits.
+        resize_helper.GrowSizeIntoSingleGroup<PolicyTraits>(common,
+                                                            set->alloc_ref());
+      }
     } else {
       // InitializeSlots prepares control bytes to correspond to empty table.
-      auto* new_slots = slot_array();
-      size_t total_probe_length = 0;
-      for (size_t i = 0; i != resize_helper.old_capacity(); ++i) {
-        if (IsFull(resize_helper.old_ctrl()[i])) {
-          size_t hash = PolicyTraits::apply(
-              HashElement{hash_ref()}, PolicyTraits::element(old_slots + i));
-          auto target = find_first_non_full(common(), hash);
-          size_t new_i = target.offset;
-          total_probe_length += target.probe_length;
-          SetCtrl(common(), new_i, H2(hash), sizeof(slot_type));
-          transfer(new_slots + new_i, old_slots + i);
+      const auto insert_slot = [&](slot_type* slot) {
+        size_t hash = PolicyTraits::apply(HashElement{set->hash_ref()},
+                                          PolicyTraits::element(slot));
+        auto target = find_first_non_full(common, hash);
+        SetCtrl(common, target.offset, H2(hash), sizeof(slot_type));
+        set->transfer(new_slots + target.offset, slot);
+        return target.probe_length;
+      };
+      if (was_soo) {
+        insert_slot(to_slot(resize_helper.old_soo_data()));
+        return;
+      } else {
+        auto* old_slots = static_cast<slot_type*>(resize_helper.old_slots());
+        size_t total_probe_length = 0;
+        for (size_t i = 0; i != resize_helper.old_capacity(); ++i) {
+          if (IsFull(resize_helper.old_ctrl()[i])) {
+            total_probe_length += insert_slot(old_slots + i);
+          }
         }
+        common.infoz().RecordRehash(total_probe_length);
       }
-      infoz().RecordRehash(total_probe_length);
     }
-    resize_helper.DeallocateOld<alignof(slot_type)>(
-        CharAlloc(alloc_ref()), sizeof(slot_type),
-        const_cast<std::remove_const_t<slot_type>*>(old_slots));
+    resize_helper.DeallocateOld<alignof(slot_type)>(CharAlloc(set->alloc_ref()),
+                                                    sizeof(slot_type));
   }
 
-  // Prunes control bytes to remove as many tombstones as possible.
-  //
-  // See the comment on `rehash_and_grow_if_necessary()`.
-  inline void drop_deletes_without_resize() {
-    // Stack-allocate space for swapping elements.
-    alignas(slot_type) unsigned char tmp[sizeof(slot_type)];
-    DropDeletesWithoutResize(common(), GetPolicyFunctions(), tmp);
-  }
+  // Casting directly from e.g. char* to slot_type* can cause compilation errors
+  // on objective-C. This function converts to void* first, avoiding the issue.
+  static slot_type* to_slot(void* buf) { return static_cast<slot_type*>(buf); }
 
-  // Called whenever the table *might* need to conditionally grow.
-  //
-  // This function is an optimization opportunity to perform a rehash even when
-  // growth is unnecessary, because vacating tombstones is beneficial for
-  // performance in the long-run.
-  void rehash_and_grow_if_necessary() {
-    const size_t cap = capacity();
-    if (cap > Group::kWidth &&
-        // Do these calculations in 64-bit to avoid overflow.
-        size() * uint64_t{32} <= cap * uint64_t{25}) {
-      // Squash DELETED without growing if there is enough capacity.
-      //
-      // Rehash in place if the current size is <= 25/32 of capacity.
-      // Rationale for such a high factor: 1) drop_deletes_without_resize() is
-      // faster than resize, and 2) it takes quite a bit of work to add
-      // tombstones.  In the worst case, seems to take approximately 4
-      // insert/erase pairs to create a single tombstone and so if we are
-      // rehashing because of tombstones, we can afford to rehash-in-place as
-      // long as we are reclaiming at least 1/8 the capacity without doing more
-      // than 2X the work.  (Where "work" is defined to be size() for rehashing
-      // or rehashing in place, and 1 for an insert or erase.)  But rehashing in
-      // place is faster per operation than inserting or even doubling the size
-      // of the table, so we actually afford to reclaim even less space from a
-      // resize-in-place.  The decision is to rehash in place if we can reclaim
-      // at about 1/8th of the usable capacity (specifically 3/28 of the
-      // capacity) which means that the total cost of rehashing will be a small
-      // fraction of the total work.
-      //
-      // Here is output of an experiment using the BM_CacheInSteadyState
-      // benchmark running the old case (where we rehash-in-place only if we can
-      // reclaim at least 7/16*capacity) vs. this code (which rehashes in place
-      // if we can recover 3/32*capacity).
-      //
-      // Note that although in the worst-case number of rehashes jumped up from
-      // 15 to 190, but the number of operations per second is almost the same.
-      //
-      // Abridged output of running BM_CacheInSteadyState benchmark from
-      // raw_hash_set_benchmark.   N is the number of insert/erase operations.
-      //
-      //      | OLD (recover >= 7/16        | NEW (recover >= 3/32)
-      // size |    N/s LoadFactor NRehashes |    N/s LoadFactor NRehashes
-      //  448 | 145284       0.44        18 | 140118       0.44        19
-      //  493 | 152546       0.24        11 | 151417       0.48        28
-      //  538 | 151439       0.26        11 | 151152       0.53        38
-      //  583 | 151765       0.28        11 | 150572       0.57        50
-      //  628 | 150241       0.31        11 | 150853       0.61        66
-      //  672 | 149602       0.33        12 | 150110       0.66        90
-      //  717 | 149998       0.35        12 | 149531       0.70       129
-      //  762 | 149836       0.37        13 | 148559       0.74       190
-      //  807 | 149736       0.39        14 | 151107       0.39        14
-      //  852 | 150204       0.42        15 | 151019       0.42        15
-      drop_deletes_without_resize();
+  // Requires that lhs does not have a full SOO slot.
+  static void move_common(bool that_is_full_soo, allocator_type& rhs_alloc,
+                          CommonFields& lhs, CommonFields&& rhs) {
+    if (PolicyTraits::transfer_uses_memcpy() || !that_is_full_soo) {
+      lhs = std::move(rhs);
     } else {
-      // Otherwise grow the container.
-      resize(NextCapacity(cap));
+      lhs.move_non_heap_or_soo_fields(rhs);
+      // TODO(b/303305702): add reentrancy guard.
+      PolicyTraits::transfer(&rhs_alloc, to_slot(lhs.soo_data()),
+                             to_slot(rhs.soo_data()));
     }
   }
 
+  // Swaps common fields making sure to avoid memcpy'ing a full SOO slot if we
+  // aren't allowed to do so.
+  void swap_common(raw_hash_set& that) {
+    using std::swap;
+    if (PolicyTraits::transfer_uses_memcpy()) {
+      swap(common(), that.common());
+      return;
+    }
+    CommonFields tmp = CommonFields::CreateDefault<SooEnabled()>();
+    const bool that_is_full_soo = that.is_full_soo();
+    move_common(that_is_full_soo, that.alloc_ref(), tmp,
+                std::move(that.common()));
+    move_common(is_full_soo(), alloc_ref(), that.common(), std::move(common()));
+    move_common(that_is_full_soo, that.alloc_ref(), common(), std::move(tmp));
+  }
+
   void maybe_increment_generation_or_rehash_on_move() {
-    common().maybe_increment_generation_on_move();
+    if (!SwisstableGenerationsEnabled() || capacity() == 0 || is_soo()) {
+      return;
+    }
+    common().increment_generation();
     if (!empty() && common().should_rehash_for_bug_detection_on_move()) {
       resize(capacity());
     }
   }
 
-  template<bool propagate_alloc>
+  template <bool propagate_alloc>
   raw_hash_set& assign_impl(raw_hash_set&& that) {
     // We don't bother checking for this/that aliasing. We just need to avoid
     // breaking the invariants in that case.
     destructor_impl();
-    common() = std::move(that.common());
+    move_common(that.is_full_soo(), that.alloc_ref(), common(),
+                std::move(that.common()));
     // TODO(b/296061262): move instead of copying hash/eq/alloc.
     hash_ref() = that.hash_ref();
     eq_ref() = that.eq_ref();
     CopyAlloc(alloc_ref(), that.alloc_ref(),
               std::integral_constant<bool, propagate_alloc>());
-    that.common() = CommonFields{};
+    that.common() = CommonFields::CreateDefault<SooEnabled()>();
     maybe_increment_generation_or_rehash_on_move();
     return *this;
   }
@@ -3051,8 +3753,8 @@
       insert(std::move(PolicyTraits::element(it.slot())));
       that.destroy(it.slot());
     }
-    that.dealloc();
-    that.common() = CommonFields{};
+    if (!that.is_soo()) that.dealloc();
+    that.common() = CommonFields::CreateDefault<SooEnabled()>();
     maybe_increment_generation_or_rehash_on_move();
     return *this;
   }
@@ -3078,12 +3780,30 @@
     return move_elements_allocs_unequal(std::move(that));
   }
 
- protected:
-  // Attempts to find `key` in the table; if it isn't found, returns a slot that
-  // the value can be inserted into, with the control byte already set to
-  // `key`'s H2.
   template <class K>
-  std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
+  std::pair<iterator, bool> find_or_prepare_insert_soo(const K& key) {
+    if (empty()) {
+      const HashtablezInfoHandle infoz = try_sample_soo();
+      if (infoz.IsSampled()) {
+        resize_with_soo_infoz(infoz);
+      } else {
+        common().set_full_soo();
+        return {soo_iterator(), true};
+      }
+    } else if (PolicyTraits::apply(EqualElement<K>{key, eq_ref()},
+                                   PolicyTraits::element(soo_slot()))) {
+      return {soo_iterator(), false};
+    } else {
+      resize(NextCapacity(SooCapacity()));
+    }
+    const size_t index =
+        PrepareInsertAfterSoo(hash_ref()(key), sizeof(slot_type), common());
+    return {iterator_at(index), true};
+  }
+
+  template <class K>
+  std::pair<iterator, bool> find_or_prepare_insert_non_soo(const K& key) {
+    assert(!is_soo());
     prefetch_heap_block();
     auto hash = hash_ref()(key);
     auto seq = probe(common(), hash);
@@ -3094,65 +3814,92 @@
         if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
                 EqualElement<K>{key, eq_ref()},
                 PolicyTraits::element(slot_array() + seq.offset(i)))))
-          return {seq.offset(i), false};
+          return {iterator_at(seq.offset(i)), false};
       }
-      if (ABSL_PREDICT_TRUE(g.MaskEmpty())) break;
+      auto mask_empty = g.MaskEmpty();
+      if (ABSL_PREDICT_TRUE(mask_empty)) {
+        size_t target = seq.offset(
+            GetInsertionOffset(mask_empty, capacity(), hash, control()));
+        return {iterator_at(PrepareInsertNonSoo(common(), hash,
+                                                FindInfo{target, seq.index()},
+                                                GetPolicyFunctions())),
+                true};
+      }
       seq.next();
       assert(seq.index() <= capacity() && "full table!");
     }
-    return {prepare_insert(hash), true};
   }
 
-  // Given the hash of a value not currently in the table, finds the next
-  // viable slot index to insert it at.
-  //
-  // REQUIRES: At least one non-full slot available.
-  size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
-    const bool rehash_for_bug_detection =
-        common().should_rehash_for_bug_detection_on_insert();
-    if (rehash_for_bug_detection) {
-      // Move to a different heap allocation in order to detect bugs.
-      const size_t cap = capacity();
-      resize(growth_left() > 0 ? cap : NextCapacity(cap));
+ protected:
+  // Asserts that hash and equal functors provided by the user are consistent,
+  // meaning that `eq(k1, k2)` implies `hash(k1)==hash(k2)`.
+  template <class K>
+  void AssertHashEqConsistent(ABSL_ATTRIBUTE_UNUSED const K& key) {
+#ifndef NDEBUG
+    if (empty()) return;
+
+    const size_t hash_of_arg = hash_ref()(key);
+    const auto assert_consistent = [&](const ctrl_t*, slot_type* slot) {
+      const value_type& element = PolicyTraits::element(slot);
+      const bool is_key_equal =
+          PolicyTraits::apply(EqualElement<K>{key, eq_ref()}, element);
+      if (!is_key_equal) return;
+
+      const size_t hash_of_slot =
+          PolicyTraits::apply(HashElement{hash_ref()}, element);
+      const bool is_hash_equal = hash_of_arg == hash_of_slot;
+      if (!is_hash_equal) {
+        // In this case, we're going to crash. Do a couple of other checks for
+        // idempotence issues. Recalculating hash/eq here is also convenient for
+        // debugging with gdb/lldb.
+        const size_t once_more_hash_arg = hash_ref()(key);
+        assert(hash_of_arg == once_more_hash_arg && "hash is not idempotent.");
+        const size_t once_more_hash_slot =
+            PolicyTraits::apply(HashElement{hash_ref()}, element);
+        assert(hash_of_slot == once_more_hash_slot &&
+               "hash is not idempotent.");
+        const bool once_more_eq =
+            PolicyTraits::apply(EqualElement<K>{key, eq_ref()}, element);
+        assert(is_key_equal == once_more_eq && "equality is not idempotent.");
+      }
+      assert((!is_key_equal || is_hash_equal) &&
+             "eq(k1, k2) must imply that hash(k1) == hash(k2). "
+             "hash/eq functors are inconsistent.");
+    };
+
+    if (is_soo()) {
+      assert_consistent(/*unused*/ nullptr, soo_slot());
+      return;
     }
-    auto target = find_first_non_full(common(), hash);
-    if (!rehash_for_bug_detection &&
-        ABSL_PREDICT_FALSE(growth_left() == 0 &&
-                           !IsDeleted(control()[target.offset]))) {
-      size_t old_capacity = capacity();
-      rehash_and_grow_if_necessary();
-      // NOTE: It is safe to use `FindFirstNonFullAfterResize`.
-      // `FindFirstNonFullAfterResize` must be called right after resize.
-      // `rehash_and_grow_if_necessary` may *not* call `resize`
-      // and perform `drop_deletes_without_resize` instead. But this
-      // could happen only on big tables.
-      // For big tables `FindFirstNonFullAfterResize` will always
-      // fallback to normal `find_first_non_full`, so it is safe to use it.
-      target = HashSetResizeHelper::FindFirstNonFullAfterResize(
-          common(), old_capacity, hash);
-    }
-    common().increment_size();
-    set_growth_left(growth_left() - IsEmpty(control()[target.offset]));
-    SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type));
-    common().maybe_increment_generation_on_insert();
-    infoz().RecordInsert(hash, target.probe_length);
-    return target.offset;
+    // We only do validation for small tables so that it's constant time.
+    if (capacity() > 16) return;
+    IterateOverFullSlots(common(), slot_array(), assert_consistent);
+#endif
+  }
+
+  // Attempts to find `key` in the table; if it isn't found, returns an iterator
+  // where the value can be inserted into, with the control byte already set to
+  // `key`'s H2. Returns a bool indicating whether an insertion can take place.
+  template <class K>
+  std::pair<iterator, bool> find_or_prepare_insert(const K& key) {
+    AssertHashEqConsistent(key);
+    if (is_soo()) return find_or_prepare_insert_soo(key);
+    return find_or_prepare_insert_non_soo(key);
   }
 
   // Constructs the value in the space pointed by the iterator. This only works
   // after an unsuccessful find_or_prepare_insert() and before any other
   // modifications happen in the raw_hash_set.
   //
-  // PRECONDITION: i is an index returned from find_or_prepare_insert(k), where
-  // k is the key decomposed from `forward<Args>(args)...`, and the bool
-  // returned by find_or_prepare_insert(k) was true.
+  // PRECONDITION: iter was returned from find_or_prepare_insert(k), where k is
+  // the key decomposed from `forward<Args>(args)...`, and the bool returned by
+  // find_or_prepare_insert(k) was true.
   // POSTCONDITION: *m.iterator_at(i) == value_type(forward<Args>(args)...).
   template <class... Args>
-  void emplace_at(size_t i, Args&&... args) {
-    construct(slot_array() + i, std::forward<Args>(args)...);
+  void emplace_at(iterator iter, Args&&... args) {
+    construct(iter.slot(), std::forward<Args>(args)...);
 
-    assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) ==
-               iterator_at(i) &&
+    assert(PolicyTraits::apply(FindElement{*this}, *iter) == iter &&
            "constructed value does not match the lookup key");
   }
 
@@ -3160,7 +3907,7 @@
     return {control() + i, slot_array() + i, common().generation_ptr()};
   }
   const_iterator iterator_at(size_t i) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
-    return {control() + i, slot_array() + i, common().generation_ptr()};
+    return const_cast<raw_hash_set*>(this)->iterator_at(i);
   }
 
   reference unchecked_deref(iterator it) { return it.unchecked_deref(); }
@@ -3178,13 +3925,25 @@
   // side-effect.
   //
   // See `CapacityToGrowth()`.
-  size_t growth_left() const { return common().growth_left(); }
-  void set_growth_left(size_t gl) { return common().set_growth_left(gl); }
+  size_t growth_left() const {
+    assert(!is_soo());
+    return common().growth_left();
+  }
+
+  GrowthInfo& growth_info() {
+    assert(!is_soo());
+    return common().growth_info();
+  }
+  GrowthInfo growth_info() const {
+    assert(!is_soo());
+    return common().growth_info();
+  }
 
   // Prefetch the heap-allocated memory region to resolve potential TLB and
   // cache misses. This is intended to overlap with execution of calculating the
   // hash for a key.
   void prefetch_heap_block() const {
+    assert(!is_soo());
 #if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__)
     __builtin_prefetch(control(), 0, 1);
 #endif
@@ -3193,11 +3952,31 @@
   CommonFields& common() { return settings_.template get<0>(); }
   const CommonFields& common() const { return settings_.template get<0>(); }
 
-  ctrl_t* control() const { return common().control(); }
+  ctrl_t* control() const {
+    assert(!is_soo());
+    return common().control();
+  }
   slot_type* slot_array() const {
+    assert(!is_soo());
     return static_cast<slot_type*>(common().slot_array());
   }
-  HashtablezInfoHandle infoz() { return common().infoz(); }
+  slot_type* soo_slot() {
+    assert(is_soo());
+    return static_cast<slot_type*>(common().soo_data());
+  }
+  const slot_type* soo_slot() const {
+    return const_cast<raw_hash_set*>(this)->soo_slot();
+  }
+  iterator soo_iterator() {
+    return {SooControl(), soo_slot(), common().generation_ptr()};
+  }
+  const_iterator soo_iterator() const {
+    return const_cast<raw_hash_set*>(this)->soo_iterator();
+  }
+  HashtablezInfoHandle infoz() {
+    assert(!is_soo());
+    return common().infoz();
+  }
 
   hasher& hash_ref() { return settings_.template get<1>(); }
   const hasher& hash_ref() const { return settings_.template get<1>(); }
@@ -3208,12 +3987,9 @@
     return settings_.template get<3>();
   }
 
-  // Make type-specific functions for this type's PolicyFunctions struct.
-  static size_t hash_slot_fn(void* set, void* slot) {
-    auto* h = static_cast<raw_hash_set*>(set);
-    return PolicyTraits::apply(
-        HashElement{h->hash_ref()},
-        PolicyTraits::element(static_cast<slot_type*>(slot)));
+  static const void* get_hash_ref_fn(const CommonFields& common) {
+    auto* h = reinterpret_cast<const raw_hash_set*>(&common);
+    return &h->hash_ref();
   }
   static void transfer_slot_fn(void* set, void* dst, void* src) {
     auto* h = static_cast<raw_hash_set*>(set);
@@ -3236,13 +4012,18 @@
   static const PolicyFunctions& GetPolicyFunctions() {
     static constexpr PolicyFunctions value = {
         sizeof(slot_type),
-        &raw_hash_set::hash_slot_fn,
+        // TODO(b/328722020): try to type erase
+        // for standard layout and alignof(Hash) <= alignof(CommonFields).
+        std::is_empty<hasher>::value ? &GetHashRefForEmptyHasher
+                                     : &raw_hash_set::get_hash_ref_fn,
+        PolicyTraits::template get_hash_slot_fn<hasher>(),
         PolicyTraits::transfer_uses_memcpy()
             ? TransferRelocatable<sizeof(slot_type)>
             : &raw_hash_set::transfer_slot_fn,
         (std::is_same<SlotAlloc, std::allocator<slot_type>>::value
              ? &DeallocateStandard<alignof(slot_type)>
              : &raw_hash_set::dealloc_fn),
+        &raw_hash_set::resize_impl,
     };
     return value;
   }
@@ -3252,22 +4033,78 @@
   // fields that occur after CommonFields.
   absl::container_internal::CompressedTuple<CommonFields, hasher, key_equal,
                                             allocator_type>
-      settings_{CommonFields{}, hasher{}, key_equal{}, allocator_type{}};
+      settings_{CommonFields::CreateDefault<SooEnabled()>(), hasher{},
+                key_equal{}, allocator_type{}};
+};
+
+// Friend access for free functions in raw_hash_set.h.
+struct HashtableFreeFunctionsAccess {
+  template <class Predicate, typename Set>
+  static typename Set::size_type EraseIf(Predicate& pred, Set* c) {
+    if (c->empty()) {
+      return 0;
+    }
+    if (c->is_soo()) {
+      auto it = c->soo_iterator();
+      if (!pred(*it)) {
+        assert(c->size() == 1 && "hash table was modified unexpectedly");
+        return 0;
+      }
+      c->destroy(it.slot());
+      c->common().set_empty_soo();
+      return 1;
+    }
+    ABSL_ATTRIBUTE_UNUSED const size_t original_size_for_assert = c->size();
+    size_t num_deleted = 0;
+    IterateOverFullSlots(
+        c->common(), c->slot_array(), [&](const ctrl_t* ctrl, auto* slot) {
+          if (pred(Set::PolicyTraits::element(slot))) {
+            c->destroy(slot);
+            EraseMetaOnly(c->common(), static_cast<size_t>(ctrl - c->control()),
+                          sizeof(*slot));
+            ++num_deleted;
+          }
+        });
+    // NOTE: IterateOverFullSlots allow removal of the current element, so we
+    // verify the size additionally here.
+    assert(original_size_for_assert - num_deleted == c->size() &&
+           "hash table was modified unexpectedly");
+    return num_deleted;
+  }
+
+  template <class Callback, typename Set>
+  static void ForEach(Callback& cb, Set* c) {
+    if (c->empty()) {
+      return;
+    }
+    if (c->is_soo()) {
+      cb(*c->soo_iterator());
+      return;
+    }
+    using ElementTypeWithConstness = decltype(*c->begin());
+    IterateOverFullSlots(
+        c->common(), c->slot_array(), [&cb](const ctrl_t*, auto* slot) {
+          ElementTypeWithConstness& element = Set::PolicyTraits::element(slot);
+          cb(element);
+        });
+  }
 };
 
 // Erases all elements that satisfy the predicate `pred` from the container `c`.
 template <typename P, typename H, typename E, typename A, typename Predicate>
 typename raw_hash_set<P, H, E, A>::size_type EraseIf(
     Predicate& pred, raw_hash_set<P, H, E, A>* c) {
-  const auto initial_size = c->size();
-  for (auto it = c->begin(), last = c->end(); it != last;) {
-    if (pred(*it)) {
-      c->erase(it++);
-    } else {
-      ++it;
-    }
-  }
-  return initial_size - c->size();
+  return HashtableFreeFunctionsAccess::EraseIf(pred, c);
+}
+
+// Calls `cb` for all elements in the container `c`.
+template <typename P, typename H, typename E, typename A, typename Callback>
+void ForEach(Callback& cb, raw_hash_set<P, H, E, A>* c) {
+  return HashtableFreeFunctionsAccess::ForEach(cb, c);
+}
+template <typename P, typename H, typename E, typename A, typename Callback>
+void ForEach(Callback& cb, const raw_hash_set<P, H, E, A>* c) {
+  return HashtableFreeFunctionsAccess::ForEach(cb, c);
 }
 
 namespace hashtable_debug_internal {
@@ -3278,6 +4115,7 @@
 
   static size_t GetNumProbes(const Set& set,
                              const typename Set::key_type& key) {
+    if (set.is_soo()) return 0;
     size_t num_probes = 0;
     size_t hash = set.hash_ref()(key);
     auto seq = probe(set.common(), hash);
@@ -3301,7 +4139,8 @@
   static size_t AllocatedByteSize(const Set& c) {
     size_t capacity = c.capacity();
     if (capacity == 0) return 0;
-    size_t m = c.common().alloc_size(sizeof(Slot), alignof(Slot));
+    size_t m =
+        c.is_soo() ? 0 : c.common().alloc_size(sizeof(Slot), alignof(Slot));
 
     size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
     if (per_slot != ~size_t{}) {
@@ -3321,5 +4160,7 @@
 }  // namespace absl
 
 #undef ABSL_SWISSTABLE_ENABLE_GENERATIONS
+#undef ABSL_SWISSTABLE_IGNORE_UNINITIALIZED
+#undef ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN
 
 #endif  // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
diff --git a/absl/container/internal/raw_hash_set_allocator_test.cc b/absl/container/internal/raw_hash_set_allocator_test.cc
index 05dcfaa..7e7a506 100644
--- a/absl/container/internal/raw_hash_set_allocator_test.cc
+++ b/absl/container/internal/raw_hash_set_allocator_test.cc
@@ -25,6 +25,7 @@
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
 #include "absl/base/config.h"
+#include "absl/container/internal/container_memory.h"
 #include "absl/container/internal/raw_hash_set.h"
 #include "absl/container/internal/tracked.h"
 
@@ -133,7 +134,7 @@
 };
 
 struct Identity {
-  int32_t operator()(int32_t v) const { return v; }
+  size_t operator()(int32_t v) const { return static_cast<size_t>(v); }
 };
 
 struct Policy {
@@ -178,6 +179,11 @@
   }
 
   static slot_type& element(slot_type* slot) { return *slot; }
+
+  template <class Hash>
+  static constexpr HashSlotFn get_hash_slot_fn() {
+    return nullptr;
+  }
 };
 
 template <int Spec>
diff --git a/absl/container/internal/raw_hash_set_benchmark.cc b/absl/container/internal/raw_hash_set_benchmark.cc
index 88b0737..424b72c 100644
--- a/absl/container/internal/raw_hash_set_benchmark.cc
+++ b/absl/container/internal/raw_hash_set_benchmark.cc
@@ -12,19 +12,24 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
+#include <algorithm>
 #include <array>
 #include <cmath>
 #include <cstddef>
 #include <cstdint>
+#include <limits>
 #include <numeric>
 #include <random>
+#include <string>
 #include <tuple>
 #include <utility>
 #include <vector>
 
 #include "absl/base/internal/raw_logging.h"
+#include "absl/container/internal/container_memory.h"
 #include "absl/container/internal/hash_function_defaults.h"
 #include "absl/container/internal/raw_hash_set.h"
+#include "absl/random/random.h"
 #include "absl/strings/str_format.h"
 #include "benchmark/benchmark.h"
 
@@ -58,6 +63,11 @@
   static auto apply(F&& f, int64_t x) -> decltype(std::forward<F>(f)(x, x)) {
     return std::forward<F>(f)(x, x);
   }
+
+  template <class Hash>
+  static constexpr HashSlotFn get_hash_slot_fn() {
+    return nullptr;
+  }
 };
 
 class StringPolicy {
@@ -116,6 +126,11 @@
     return apply_impl(std::forward<F>(f),
                       PairArgs(std::forward<Args>(args)...));
   }
+
+  template <class Hash>
+  static constexpr HashSlotFn get_hash_slot_fn() {
+    return nullptr;
+  }
 };
 
 struct StringHash : container_internal::hash_default_hash<absl::string_view> {
@@ -294,7 +309,7 @@
     benchmark::DoNotOptimize(t2);
   }
 }
-BENCHMARK(BM_CopyCtorSparseInt)->Range(128, 4096);
+BENCHMARK(BM_CopyCtorSparseInt)->Range(1, 4096);
 
 void BM_CopyCtorInt(benchmark::State& state) {
   std::random_device rd;
@@ -312,7 +327,7 @@
     benchmark::DoNotOptimize(t2);
   }
 }
-BENCHMARK(BM_CopyCtorInt)->Range(128, 4096);
+BENCHMARK(BM_CopyCtorInt)->Range(0, 4096);
 
 void BM_CopyCtorString(benchmark::State& state) {
   std::random_device rd;
@@ -330,7 +345,7 @@
     benchmark::DoNotOptimize(t2);
   }
 }
-BENCHMARK(BM_CopyCtorString)->Range(128, 4096);
+BENCHMARK(BM_CopyCtorString)->Range(0, 4096);
 
 void BM_CopyAssign(benchmark::State& state) {
   std::random_device rd;
@@ -445,6 +460,19 @@
 }
 BENCHMARK(BM_Group_Match);
 
+void BM_GroupPortable_Match(benchmark::State& state) {
+  std::array<ctrl_t, GroupPortableImpl::kWidth> group;
+  Iota(group.begin(), group.end(), -4);
+  GroupPortableImpl g{group.data()};
+  h2_t h = 1;
+  for (auto _ : state) {
+    ::benchmark::DoNotOptimize(h);
+    ::benchmark::DoNotOptimize(g);
+    ::benchmark::DoNotOptimize(g.Match(h));
+  }
+}
+BENCHMARK(BM_GroupPortable_Match);
+
 void BM_Group_MaskEmpty(benchmark::State& state) {
   std::array<ctrl_t, Group::kWidth> group;
   Iota(group.begin(), group.end(), -4);
@@ -467,6 +495,17 @@
 }
 BENCHMARK(BM_Group_MaskEmptyOrDeleted);
 
+void BM_Group_MaskNonFull(benchmark::State& state) {
+  std::array<ctrl_t, Group::kWidth> group;
+  Iota(group.begin(), group.end(), -4);
+  Group g{group.data()};
+  for (auto _ : state) {
+    ::benchmark::DoNotOptimize(g);
+    ::benchmark::DoNotOptimize(g.MaskNonFull());
+  }
+}
+BENCHMARK(BM_Group_MaskNonFull);
+
 void BM_Group_CountLeadingEmptyOrDeleted(benchmark::State& state) {
   std::array<ctrl_t, Group::kWidth> group;
   Iota(group.begin(), group.end(), -2);
@@ -489,6 +528,17 @@
 }
 BENCHMARK(BM_Group_MatchFirstEmptyOrDeleted);
 
+void BM_Group_MatchFirstNonFull(benchmark::State& state) {
+  std::array<ctrl_t, Group::kWidth> group;
+  Iota(group.begin(), group.end(), -2);
+  Group g{group.data()};
+  for (auto _ : state) {
+    ::benchmark::DoNotOptimize(g);
+    ::benchmark::DoNotOptimize(g.MaskNonFull().LowestBitSet());
+  }
+}
+BENCHMARK(BM_Group_MatchFirstNonFull);
+
 void BM_DropDeletes(benchmark::State& state) {
   constexpr size_t capacity = (1 << 20) - 1;
   std::vector<ctrl_t> ctrl(capacity + 1 + Group::kWidth);
@@ -528,6 +578,67 @@
 }
 BENCHMARK(BM_Resize);
 
+void BM_EraseIf(benchmark::State& state) {
+  int64_t num_elements = state.range(0);
+  size_t num_erased = static_cast<size_t>(state.range(1));
+
+  constexpr size_t kRepetitions = 64;
+
+  absl::BitGen rng;
+
+  std::vector<std::vector<int64_t>> keys(kRepetitions);
+  std::vector<IntTable> tables;
+  std::vector<int64_t> threshold;
+  for (auto& k : keys) {
+    tables.push_back(IntTable());
+    auto& table = tables.back();
+    for (int64_t i = 0; i < num_elements; i++) {
+      // We use random keys to reduce noise.
+      k.push_back(
+          absl::Uniform<int64_t>(rng, 0, std::numeric_limits<int64_t>::max()));
+      if (!table.insert(k.back()).second) {
+        k.pop_back();
+        --i;  // duplicated value, retrying
+      }
+    }
+    std::sort(k.begin(), k.end());
+    threshold.push_back(static_cast<int64_t>(num_erased) < num_elements
+                            ? k[num_erased]
+                            : std::numeric_limits<int64_t>::max());
+  }
+
+  while (state.KeepRunningBatch(static_cast<int64_t>(kRepetitions) *
+                                std::max(num_elements, int64_t{1}))) {
+    benchmark::DoNotOptimize(tables);
+    for (size_t t_id = 0; t_id < kRepetitions; t_id++) {
+      auto& table = tables[t_id];
+      benchmark::DoNotOptimize(num_erased);
+      auto pred = [t = threshold[t_id]](int64_t key) { return key < t; };
+      benchmark::DoNotOptimize(pred);
+      benchmark::DoNotOptimize(table);
+      absl::container_internal::EraseIf(pred, &table);
+    }
+    state.PauseTiming();
+    for (size_t t_id = 0; t_id < kRepetitions; t_id++) {
+      auto& k = keys[t_id];
+      auto& table = tables[t_id];
+      for (size_t i = 0; i < num_erased; i++) {
+        table.insert(k[i]);
+      }
+    }
+    state.ResumeTiming();
+  }
+}
+
+BENCHMARK(BM_EraseIf)
+    ->ArgNames({"num_elements", "num_erased"})
+    ->ArgPair(10, 0)
+    ->ArgPair(1000, 0)
+    ->ArgPair(10, 5)
+    ->ArgPair(1000, 500)
+    ->ArgPair(10, 10)
+    ->ArgPair(1000, 1000);
+
 }  // namespace
 }  // namespace container_internal
 ABSL_NAMESPACE_END
diff --git a/absl/container/internal/raw_hash_set_probe_benchmark.cc b/absl/container/internal/raw_hash_set_probe_benchmark.cc
index 5d4184b..8f36305 100644
--- a/absl/container/internal/raw_hash_set_probe_benchmark.cc
+++ b/absl/container/internal/raw_hash_set_probe_benchmark.cc
@@ -70,6 +70,11 @@
       -> decltype(std::forward<F>(f)(arg, arg)) {
     return std::forward<F>(f)(arg, arg);
   }
+
+  template <class Hash>
+  static constexpr auto get_hash_slot_fn() {
+    return nullptr;
+  }
 };
 
 absl::BitGen& GlobalBitGen() {
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
index f9797f5..f1257d4 100644
--- a/absl/container/internal/raw_hash_set_test.cc
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -15,6 +15,7 @@
 #include "absl/container/internal/raw_hash_set.h"
 
 #include <algorithm>
+#include <array>
 #include <atomic>
 #include <cmath>
 #include <cstddef>
@@ -51,10 +52,15 @@
 #include "absl/container/internal/hashtable_debug.h"
 #include "absl/container/internal/hashtablez_sampler.h"
 #include "absl/container/internal/test_allocator.h"
+#include "absl/container/internal/test_instance_tracker.h"
+#include "absl/container/node_hash_set.h"
+#include "absl/functional/function_ref.h"
 #include "absl/hash/hash.h"
+#include "absl/log/check.h"
 #include "absl/log/log.h"
 #include "absl/memory/memory.h"
 #include "absl/meta/type_traits.h"
+#include "absl/strings/str_cat.h"
 #include "absl/strings/string_view.h"
 
 namespace absl {
@@ -63,6 +69,10 @@
 
 struct RawHashSetTestOnlyAccess {
   template <typename C>
+  static auto GetCommon(const C& c) -> decltype(c.common()) {
+    return c.common();
+  }
+  template <typename C>
   static auto GetSlots(const C& c) -> decltype(c.slot_array()) {
     return c.slot_array();
   }
@@ -75,6 +85,7 @@
 namespace {
 
 using ::testing::ElementsAre;
+using ::testing::ElementsAreArray;
 using ::testing::Eq;
 using ::testing::Ge;
 using ::testing::Lt;
@@ -84,6 +95,94 @@
 // Convenience function to static cast to ctrl_t.
 ctrl_t CtrlT(int i) { return static_cast<ctrl_t>(i); }
 
+TEST(GrowthInfoTest, GetGrowthLeft) {
+  GrowthInfo gi;
+  gi.InitGrowthLeftNoDeleted(5);
+  EXPECT_EQ(gi.GetGrowthLeft(), 5);
+  gi.OverwriteFullAsDeleted();
+  EXPECT_EQ(gi.GetGrowthLeft(), 5);
+}
+
+TEST(GrowthInfoTest, HasNoDeleted) {
+  GrowthInfo gi;
+  gi.InitGrowthLeftNoDeleted(5);
+  EXPECT_TRUE(gi.HasNoDeleted());
+  gi.OverwriteFullAsDeleted();
+  EXPECT_FALSE(gi.HasNoDeleted());
+  // After reinitialization we have no deleted slots.
+  gi.InitGrowthLeftNoDeleted(5);
+  EXPECT_TRUE(gi.HasNoDeleted());
+}
+
+TEST(GrowthInfoTest, HasNoDeletedAndGrowthLeft) {
+  GrowthInfo gi;
+  gi.InitGrowthLeftNoDeleted(5);
+  EXPECT_TRUE(gi.HasNoDeletedAndGrowthLeft());
+  gi.OverwriteFullAsDeleted();
+  EXPECT_FALSE(gi.HasNoDeletedAndGrowthLeft());
+  gi.InitGrowthLeftNoDeleted(0);
+  EXPECT_FALSE(gi.HasNoDeletedAndGrowthLeft());
+  gi.OverwriteFullAsDeleted();
+  EXPECT_FALSE(gi.HasNoDeletedAndGrowthLeft());
+  // After reinitialization we have no deleted slots.
+  gi.InitGrowthLeftNoDeleted(5);
+  EXPECT_TRUE(gi.HasNoDeletedAndGrowthLeft());
+}
+
+TEST(GrowthInfoTest, HasNoGrowthLeftAndNoDeleted) {
+  GrowthInfo gi;
+  gi.InitGrowthLeftNoDeleted(1);
+  EXPECT_FALSE(gi.HasNoGrowthLeftAndNoDeleted());
+  gi.OverwriteEmptyAsFull();
+  EXPECT_TRUE(gi.HasNoGrowthLeftAndNoDeleted());
+  gi.OverwriteFullAsDeleted();
+  EXPECT_FALSE(gi.HasNoGrowthLeftAndNoDeleted());
+  gi.OverwriteFullAsEmpty();
+  EXPECT_FALSE(gi.HasNoGrowthLeftAndNoDeleted());
+  gi.InitGrowthLeftNoDeleted(0);
+  EXPECT_TRUE(gi.HasNoGrowthLeftAndNoDeleted());
+  gi.OverwriteFullAsEmpty();
+  EXPECT_FALSE(gi.HasNoGrowthLeftAndNoDeleted());
+}
+
+TEST(GrowthInfoTest, OverwriteFullAsEmpty) {
+  GrowthInfo gi;
+  gi.InitGrowthLeftNoDeleted(5);
+  gi.OverwriteFullAsEmpty();
+  EXPECT_EQ(gi.GetGrowthLeft(), 6);
+  gi.OverwriteFullAsDeleted();
+  EXPECT_EQ(gi.GetGrowthLeft(), 6);
+  gi.OverwriteFullAsEmpty();
+  EXPECT_EQ(gi.GetGrowthLeft(), 7);
+  EXPECT_FALSE(gi.HasNoDeleted());
+}
+
+TEST(GrowthInfoTest, OverwriteEmptyAsFull) {
+  GrowthInfo gi;
+  gi.InitGrowthLeftNoDeleted(5);
+  gi.OverwriteEmptyAsFull();
+  EXPECT_EQ(gi.GetGrowthLeft(), 4);
+  gi.OverwriteFullAsDeleted();
+  EXPECT_EQ(gi.GetGrowthLeft(), 4);
+  gi.OverwriteEmptyAsFull();
+  EXPECT_EQ(gi.GetGrowthLeft(), 3);
+  EXPECT_FALSE(gi.HasNoDeleted());
+}
+
+TEST(GrowthInfoTest, OverwriteControlAsFull) {
+  GrowthInfo gi;
+  gi.InitGrowthLeftNoDeleted(5);
+  gi.OverwriteControlAsFull(ctrl_t::kEmpty);
+  EXPECT_EQ(gi.GetGrowthLeft(), 4);
+  gi.OverwriteControlAsFull(ctrl_t::kDeleted);
+  EXPECT_EQ(gi.GetGrowthLeft(), 4);
+  gi.OverwriteFullAsDeleted();
+  gi.OverwriteControlAsFull(ctrl_t::kDeleted);
+  // We do not count number of deleted, so the bit sticks till the next rehash.
+  EXPECT_FALSE(gi.HasNoDeletedAndGrowthLeft());
+  EXPECT_FALSE(gi.HasNoDeleted());
+}
+
 TEST(Util, NormalizeCapacity) {
   EXPECT_EQ(1, NormalizeCapacity(0));
   EXPECT_EQ(1, NormalizeCapacity(1));
@@ -156,20 +255,66 @@
   EXPECT_THAT((BitMask<uint8_t, 8>(0xAA)), ElementsAre(1, 3, 5, 7));
 }
 
-TEST(BitMask, WithShift) {
+TEST(BitMask, WithShift_MatchPortable) {
   // See the non-SSE version of Group for details on what this math is for.
   uint64_t ctrl = 0x1716151413121110;
   uint64_t hash = 0x12;
-  constexpr uint64_t msbs = 0x8080808080808080ULL;
   constexpr uint64_t lsbs = 0x0101010101010101ULL;
   auto x = ctrl ^ (lsbs * hash);
-  uint64_t mask = (x - lsbs) & ~x & msbs;
+  uint64_t mask = (x - lsbs) & ~x & kMsbs8Bytes;
   EXPECT_EQ(0x0000000080800000, mask);
 
   BitMask<uint64_t, 8, 3> b(mask);
   EXPECT_EQ(*b, 2);
 }
 
+constexpr uint64_t kSome8BytesMask = /*  */ 0x8000808080008000ULL;
+constexpr uint64_t kSome8BytesMaskAllOnes = 0xff00ffffff00ff00ULL;
+constexpr auto kSome8BytesMaskBits = std::array<int, 5>{1, 3, 4, 5, 7};
+
+
+TEST(BitMask, WithShift_FullMask) {
+  EXPECT_THAT((BitMask<uint64_t, 8, 3>(kMsbs8Bytes)),
+              ElementsAre(0, 1, 2, 3, 4, 5, 6, 7));
+  EXPECT_THAT(
+      (BitMask<uint64_t, 8, 3, /*NullifyBitsOnIteration=*/true>(kMsbs8Bytes)),
+      ElementsAre(0, 1, 2, 3, 4, 5, 6, 7));
+  EXPECT_THAT(
+      (BitMask<uint64_t, 8, 3, /*NullifyBitsOnIteration=*/true>(~uint64_t{0})),
+      ElementsAre(0, 1, 2, 3, 4, 5, 6, 7));
+}
+
+TEST(BitMask, WithShift_EmptyMask) {
+  EXPECT_THAT((BitMask<uint64_t, 8, 3>(0)), ElementsAre());
+  EXPECT_THAT((BitMask<uint64_t, 8, 3, /*NullifyBitsOnIteration=*/true>(0)),
+              ElementsAre());
+}
+
+TEST(BitMask, WithShift_SomeMask) {
+  EXPECT_THAT((BitMask<uint64_t, 8, 3>(kSome8BytesMask)),
+              ElementsAreArray(kSome8BytesMaskBits));
+  EXPECT_THAT((BitMask<uint64_t, 8, 3, /*NullifyBitsOnIteration=*/true>(
+                  kSome8BytesMask)),
+              ElementsAreArray(kSome8BytesMaskBits));
+  EXPECT_THAT((BitMask<uint64_t, 8, 3, /*NullifyBitsOnIteration=*/true>(
+                  kSome8BytesMaskAllOnes)),
+              ElementsAreArray(kSome8BytesMaskBits));
+}
+
+TEST(BitMask, WithShift_SomeMaskExtraBitsForNullify) {
+  // Verify that adding extra bits into non zero bytes is fine.
+  uint64_t extra_bits = 77;
+  for (int i = 0; i < 100; ++i) {
+    // Add extra bits, but keep zero bytes untouched.
+    uint64_t extra_mask = extra_bits & kSome8BytesMaskAllOnes;
+    EXPECT_THAT((BitMask<uint64_t, 8, 3, /*NullifyBitsOnIteration=*/true>(
+                    kSome8BytesMask | extra_mask)),
+                ElementsAreArray(kSome8BytesMaskBits))
+        << i << " " << extra_mask;
+    extra_bits = (extra_bits + 1) * 3;
+  }
+}
+
 TEST(BitMask, LeadingTrailing) {
   EXPECT_EQ((BitMask<uint32_t, 16>(0x00001a40).LeadingZeros()), 3);
   EXPECT_EQ((BitMask<uint32_t, 16>(0x00001a40).TrailingZeros()), 6);
@@ -255,6 +400,25 @@
   }
 }
 
+TEST(Group, MaskNonFull) {
+  if (Group::kWidth == 16) {
+    ctrl_t group[] = {
+        ctrl_t::kEmpty, CtrlT(1),          ctrl_t::kDeleted,  CtrlT(3),
+        ctrl_t::kEmpty, CtrlT(5),          ctrl_t::kSentinel, CtrlT(7),
+        CtrlT(7),       CtrlT(5),          ctrl_t::kDeleted,  CtrlT(1),
+        CtrlT(1),       ctrl_t::kSentinel, ctrl_t::kEmpty,    CtrlT(1)};
+    EXPECT_THAT(Group{group}.MaskNonFull(),
+                ElementsAre(0, 2, 4, 6, 10, 13, 14));
+  } else if (Group::kWidth == 8) {
+    ctrl_t group[] = {ctrl_t::kEmpty,    CtrlT(1), ctrl_t::kEmpty,
+                      ctrl_t::kDeleted,  CtrlT(2), ctrl_t::kSentinel,
+                      ctrl_t::kSentinel, CtrlT(1)};
+    EXPECT_THAT(Group{group}.MaskNonFull(), ElementsAre(0, 2, 3, 5, 6));
+  } else {
+    FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
+  }
+}
+
 TEST(Group, MaskEmptyOrDeleted) {
   if (Group::kWidth == 16) {
     ctrl_t group[] = {ctrl_t::kEmpty,   CtrlT(1), ctrl_t::kEmpty,    CtrlT(3),
@@ -323,7 +487,7 @@
   }
 }
 
-template <class T, bool kTransferable = false>
+template <class T, bool kTransferable = false, bool kSoo = false>
 struct ValuePolicy {
   using slot_type = T;
   using key_type = T;
@@ -357,6 +521,13 @@
     return absl::container_internal::DecomposeValue(
         std::forward<F>(f), std::forward<Args>(args)...);
   }
+
+  template <class Hash>
+  static constexpr HashSlotFn get_hash_slot_fn() {
+    return nullptr;
+  }
+
+  static constexpr bool soo_enabled() { return kSoo; }
 };
 
 using IntPolicy = ValuePolicy<int64_t>;
@@ -364,6 +535,44 @@
 
 using TranferableIntPolicy = ValuePolicy<int64_t, /*kTransferable=*/true>;
 
+// For testing SOO.
+template <int N>
+class SizedValue {
+ public:
+  SizedValue(int64_t v) {  // NOLINT
+    vals_[0] = v;
+  }
+  SizedValue() : SizedValue(0) {}
+  SizedValue(const SizedValue&) = default;
+  SizedValue& operator=(const SizedValue&) = default;
+
+  int64_t operator*() const {
+    // Suppress erroneous uninitialized memory errors on GCC.
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
+    return vals_[0];
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+  }
+  explicit operator int() const { return **this; }
+  explicit operator int64_t() const { return **this; }
+
+  template <typename H>
+  friend H AbslHashValue(H h, SizedValue sv) {
+    return H::combine(std::move(h), *sv);
+  }
+  bool operator==(const SizedValue& rhs) const { return **this == *rhs; }
+
+ private:
+  int64_t vals_[N / sizeof(int64_t)];
+};
+template <int N, bool kSoo>
+using SizedValuePolicy =
+    ValuePolicy<SizedValue<N>, /*kTransferable=*/true, kSoo>;
+
 class StringPolicy {
   template <class F, class K, class V,
             class = typename std::enable_if<
@@ -420,6 +629,11 @@
     return apply_impl(std::forward<F>(f),
                       PairArgs(std::forward<Args>(args)...));
   }
+
+  template <class Hash>
+  static constexpr HashSlotFn get_hash_slot_fn() {
+    return nullptr;
+  }
 };
 
 struct StringHash : absl::Hash<absl::string_view> {
@@ -436,9 +650,9 @@
   using Base::Base;
 };
 
-template <typename T, bool kTransferable = false>
+template <typename T, bool kTransferable = false, bool kSoo = false>
 struct ValueTable
-    : raw_hash_set<ValuePolicy<T, kTransferable>, hash_default_hash<T>,
+    : raw_hash_set<ValuePolicy<T, kTransferable, kSoo>, hash_default_hash<T>,
                    std::equal_to<T>, std::allocator<T>> {
   using Base = typename ValueTable::raw_hash_set;
   using Base::Base;
@@ -449,6 +663,11 @@
 
 using TransferableIntTable = ValueTable<int64_t, /*kTransferable=*/true>;
 
+constexpr size_t kNonSooSize = sizeof(HeapOrSoo) + 8;
+static_assert(sizeof(SizedValue<kNonSooSize>) >= kNonSooSize, "too small");
+using NonSooIntTable = ValueTable<SizedValue<kNonSooSize>>;
+using SooIntTable = ValueTable<int64_t, /*kTransferable=*/true, /*kSoo=*/true>;
+
 template <typename T>
 struct CustomAlloc : std::allocator<T> {
   CustomAlloc() = default;
@@ -498,6 +717,16 @@
   bool* frozen;
 };
 
+template <int N>
+struct FreezableSizedValueSooTable
+    : raw_hash_set<SizedValuePolicy<N, /*kSoo=*/true>,
+                   container_internal::hash_default_hash<SizedValue<N>>,
+                   std::equal_to<SizedValue<N>>,
+                   FreezableAlloc<SizedValue<N>>> {
+  using Base = typename FreezableSizedValueSooTable::raw_hash_set;
+  using Base::Base;
+};
+
 struct BadFastHash {
   template <class T>
   size_t operator()(const T&) const {
@@ -568,20 +797,26 @@
                        std::equal_to<absl::string_view>, std::allocator<int>>));
 }
 
-TEST(Table, Empty) {
-  IntTable t;
+template <class TableType>
+class SooTest : public testing::Test {};
+
+using SooTableTypes = ::testing::Types<SooIntTable, NonSooIntTable>;
+TYPED_TEST_SUITE(SooTest, SooTableTypes);
+
+TYPED_TEST(SooTest, Empty) {
+  TypeParam t;
   EXPECT_EQ(0, t.size());
   EXPECT_TRUE(t.empty());
 }
 
-TEST(Table, LookupEmpty) {
-  IntTable t;
+TYPED_TEST(SooTest, LookupEmpty) {
+  TypeParam t;
   auto it = t.find(0);
   EXPECT_TRUE(it == t.end());
 }
 
-TEST(Table, Insert1) {
-  IntTable t;
+TYPED_TEST(SooTest, Insert1) {
+  TypeParam t;
   EXPECT_TRUE(t.find(0) == t.end());
   auto res = t.emplace(0);
   EXPECT_TRUE(res.second);
@@ -590,8 +825,8 @@
   EXPECT_THAT(*t.find(0), 0);
 }
 
-TEST(Table, Insert2) {
-  IntTable t;
+TYPED_TEST(SooTest, Insert2) {
+  TypeParam t;
   EXPECT_TRUE(t.find(0) == t.end());
   auto res = t.emplace(0);
   EXPECT_TRUE(res.second);
@@ -653,9 +888,9 @@
   EXPECT_TRUE(t.empty());
 }
 
-TEST(Table, EraseInSmallTables) {
+TYPED_TEST(SooTest, EraseInSmallTables) {
   for (int64_t size = 0; size < 64; ++size) {
-    IntTable t;
+    TypeParam t;
     for (int64_t i = 0; i < size; ++i) {
       t.insert(i);
     }
@@ -670,8 +905,8 @@
   }
 }
 
-TEST(Table, InsertWithinCapacity) {
-  IntTable t;
+TYPED_TEST(SooTest, InsertWithinCapacity) {
+  TypeParam t;
   t.reserve(10);
   const size_t original_capacity = t.capacity();
   const auto addr = [&](int i) {
@@ -704,9 +939,11 @@
 template <class TableType>
 class SmallTableResizeTest : public testing::Test {};
 
-TYPED_TEST_SUITE_P(SmallTableResizeTest);
+using SmallTableTypes =
+    ::testing::Types<IntTable, TransferableIntTable, SooIntTable>;
+TYPED_TEST_SUITE(SmallTableResizeTest, SmallTableTypes);
 
-TYPED_TEST_P(SmallTableResizeTest, InsertIntoSmallTable) {
+TYPED_TEST(SmallTableResizeTest, InsertIntoSmallTable) {
   TypeParam t;
   for (int i = 0; i < 32; ++i) {
     t.insert(i);
@@ -718,11 +955,11 @@
   }
 }
 
-TYPED_TEST_P(SmallTableResizeTest, ResizeGrowSmallTables) {
-  TypeParam t;
+TYPED_TEST(SmallTableResizeTest, ResizeGrowSmallTables) {
   for (size_t source_size = 0; source_size < 32; ++source_size) {
     for (size_t target_size = source_size; target_size < 32; ++target_size) {
       for (bool rehash : {false, true}) {
+        TypeParam t;
         for (size_t i = 0; i < source_size; ++i) {
           t.insert(static_cast<int>(i));
         }
@@ -740,15 +977,21 @@
   }
 }
 
-TYPED_TEST_P(SmallTableResizeTest, ResizeReduceSmallTables) {
-  TypeParam t;
+TYPED_TEST(SmallTableResizeTest, ResizeReduceSmallTables) {
   for (size_t source_size = 0; source_size < 32; ++source_size) {
     for (size_t target_size = 0; target_size <= source_size; ++target_size) {
+      TypeParam t;
       size_t inserted_count = std::min<size_t>(source_size, 5);
       for (size_t i = 0; i < inserted_count; ++i) {
         t.insert(static_cast<int>(i));
       }
+      const size_t minimum_capacity = t.capacity();
+      t.reserve(source_size);
       t.rehash(target_size);
+      if (target_size == 0) {
+        EXPECT_EQ(t.capacity(), minimum_capacity)
+            << "rehash(0) must resize to the minimum capacity";
+      }
       for (size_t i = 0; i < inserted_count; ++i) {
         EXPECT_TRUE(t.find(static_cast<int>(i)) != t.end());
         EXPECT_EQ(*t.find(static_cast<int>(i)), static_cast<int>(i));
@@ -757,12 +1000,6 @@
   }
 }
 
-REGISTER_TYPED_TEST_SUITE_P(SmallTableResizeTest, InsertIntoSmallTable,
-                            ResizeGrowSmallTables, ResizeReduceSmallTables);
-using SmallTableTypes = ::testing::Types<IntTable, TransferableIntTable>;
-INSTANTIATE_TYPED_TEST_SUITE_P(InstanceSmallTableResizeTest,
-                               SmallTableResizeTest, SmallTableTypes);
-
 TEST(Table, LazyEmplace) {
   StringTable t;
   bool called = false;
@@ -781,14 +1018,14 @@
   EXPECT_THAT(*it, Pair("abc", "ABC"));
 }
 
-TEST(Table, ContainsEmpty) {
-  IntTable t;
+TYPED_TEST(SooTest, ContainsEmpty) {
+  TypeParam t;
 
   EXPECT_FALSE(t.contains(0));
 }
 
-TEST(Table, Contains1) {
-  IntTable t;
+TYPED_TEST(SooTest, Contains1) {
+  TypeParam t;
 
   EXPECT_TRUE(t.insert(0).second);
   EXPECT_TRUE(t.contains(0));
@@ -798,8 +1035,8 @@
   EXPECT_FALSE(t.contains(0));
 }
 
-TEST(Table, Contains2) {
-  IntTable t;
+TYPED_TEST(SooTest, Contains2) {
+  TypeParam t;
 
   EXPECT_TRUE(t.insert(0).second);
   EXPECT_TRUE(t.contains(0));
@@ -875,6 +1112,11 @@
   static auto apply(F&& f, const T& x) -> decltype(std::forward<F>(f)(x, x)) {
     return std::forward<F>(f)(x, x);
   }
+
+  template <class Hash>
+  static constexpr HashSlotFn get_hash_slot_fn() {
+    return nullptr;
+  }
 };
 
 template <typename Hash, typename Eq>
@@ -1035,7 +1277,7 @@
 }
 
 struct Modulo1000Hash {
-  size_t operator()(int x) const { return x % 1000; }
+  size_t operator()(int64_t x) const { return static_cast<size_t>(x) % 1000; }
 };
 
 struct Modulo1000HashTable
@@ -1091,8 +1333,8 @@
   }
 }
 
-TEST(Table, InsertEraseStressTest) {
-  IntTable t;
+TYPED_TEST(SooTest, InsertEraseStressTest) {
+  TypeParam t;
   const size_t kMinElementCount = 250;
   std::deque<int> keys;
   size_t i = 0;
@@ -1120,32 +1362,33 @@
                                       Pair("DEF", "!!!")));
 }
 
-TEST(Table, LargeTable) {
-  IntTable t;
+TYPED_TEST(SooTest, LargeTable) {
+  TypeParam t;
   for (int64_t i = 0; i != 100000; ++i) t.emplace(i << 40);
-  for (int64_t i = 0; i != 100000; ++i) ASSERT_EQ(i << 40, *t.find(i << 40));
+  for (int64_t i = 0; i != 100000; ++i)
+    ASSERT_EQ(i << 40, static_cast<int64_t>(*t.find(i << 40)));
 }
 
 // Timeout if copy is quadratic as it was in Rust.
-TEST(Table, EnsureNonQuadraticAsInRust) {
+TYPED_TEST(SooTest, EnsureNonQuadraticAsInRust) {
   static const size_t kLargeSize = 1 << 15;
 
-  IntTable t;
+  TypeParam t;
   for (size_t i = 0; i != kLargeSize; ++i) {
     t.insert(i);
   }
 
   // If this is quadratic, the test will timeout.
-  IntTable t2;
+  TypeParam t2;
   for (const auto& entry : t) t2.insert(entry);
 }
 
-TEST(Table, ClearBug) {
+TYPED_TEST(SooTest, ClearBug) {
   if (SwisstableGenerationsEnabled()) {
     GTEST_SKIP() << "Generations being enabled causes extra rehashes.";
   }
 
-  IntTable t;
+  TypeParam t;
   constexpr size_t capacity = container_internal::Group::kWidth - 1;
   constexpr size_t max_size = capacity / 2 + 1;
   for (size_t i = 0; i < max_size; ++i) {
@@ -1164,11 +1407,11 @@
   // that they are probably still in the same group.  This is not strictly
   // guaranteed.
   EXPECT_LT(static_cast<size_t>(std::abs(original - second)),
-            capacity * sizeof(IntTable::value_type));
+            capacity * sizeof(typename TypeParam::value_type));
 }
 
-TEST(Table, Erase) {
-  IntTable t;
+TYPED_TEST(SooTest, Erase) {
+  TypeParam t;
   EXPECT_TRUE(t.find(0) == t.end());
   auto res = t.emplace(0);
   EXPECT_TRUE(res.second);
@@ -1178,8 +1421,8 @@
   EXPECT_TRUE(t.find(0) == t.end());
 }
 
-TEST(Table, EraseMaintainsValidIterator) {
-  IntTable t;
+TYPED_TEST(SooTest, EraseMaintainsValidIterator) {
+  TypeParam t;
   const int kNumElements = 100;
   for (int i = 0; i < kNumElements; i++) {
     EXPECT_TRUE(t.emplace(i).second);
@@ -1197,8 +1440,8 @@
   EXPECT_EQ(num_erase_calls, kNumElements);
 }
 
-TEST(Table, EraseBeginEnd) {
-  IntTable t;
+TYPED_TEST(SooTest, EraseBeginEnd) {
+  TypeParam t;
   for (int i = 0; i < 10; ++i) t.insert(i);
   EXPECT_EQ(t.size(), 10);
   t.erase(t.begin(), t.end());
@@ -1597,8 +1840,29 @@
   EXPECT_THAT(t, UnorderedElementsAre(1, 10, 3, 11, 12));
 }
 
-TEST(Table, Clear) {
-  IntTable t;
+TEST(Table, GrowthInfoDeletedBit) {
+  BadTable t;
+  EXPECT_TRUE(
+      RawHashSetTestOnlyAccess::GetCommon(t).growth_info().HasNoDeleted());
+  int64_t init_count = static_cast<int64_t>(
+      CapacityToGrowth(NormalizeCapacity(Group::kWidth + 1)));
+  for (int64_t i = 0; i < init_count; ++i) {
+    t.insert(i);
+  }
+  EXPECT_TRUE(
+      RawHashSetTestOnlyAccess::GetCommon(t).growth_info().HasNoDeleted());
+  t.erase(0);
+  EXPECT_EQ(RawHashSetTestOnlyAccess::CountTombstones(t), 1);
+  EXPECT_FALSE(
+      RawHashSetTestOnlyAccess::GetCommon(t).growth_info().HasNoDeleted());
+  t.rehash(0);
+  EXPECT_EQ(RawHashSetTestOnlyAccess::CountTombstones(t), 0);
+  EXPECT_TRUE(
+      RawHashSetTestOnlyAccess::GetCommon(t).growth_info().HasNoDeleted());
+}
+
+TYPED_TEST(SooTest, Clear) {
+  TypeParam t;
   EXPECT_TRUE(t.find(0) == t.end());
   t.clear();
   EXPECT_TRUE(t.find(0) == t.end());
@@ -1610,13 +1874,13 @@
   EXPECT_TRUE(t.find(0) == t.end());
 }
 
-TEST(Table, Swap) {
-  IntTable t;
+TYPED_TEST(SooTest, Swap) {
+  TypeParam t;
   EXPECT_TRUE(t.find(0) == t.end());
   auto res = t.emplace(0);
   EXPECT_TRUE(res.second);
   EXPECT_EQ(1, t.size());
-  IntTable u;
+  TypeParam u;
   t.swap(u);
   EXPECT_EQ(0, t.size());
   EXPECT_EQ(1, u.size());
@@ -1624,8 +1888,8 @@
   EXPECT_THAT(*u.find(0), 0);
 }
 
-TEST(Table, Rehash) {
-  IntTable t;
+TYPED_TEST(SooTest, Rehash) {
+  TypeParam t;
   EXPECT_TRUE(t.find(0) == t.end());
   t.emplace(0);
   t.emplace(1);
@@ -1636,8 +1900,8 @@
   EXPECT_THAT(*t.find(1), 1);
 }
 
-TEST(Table, RehashDoesNotRehashWhenNotNecessary) {
-  IntTable t;
+TYPED_TEST(SooTest, RehashDoesNotRehashWhenNotNecessary) {
+  TypeParam t;
   t.emplace(0);
   t.emplace(1);
   auto* p = &*t.find(0);
@@ -1645,14 +1909,15 @@
   EXPECT_EQ(p, &*t.find(0));
 }
 
+// Following two tests use non-SOO table because they test for 0 capacity.
 TEST(Table, RehashZeroDoesNotAllocateOnEmptyTable) {
-  IntTable t;
+  NonSooIntTable t;
   t.rehash(0);
   EXPECT_EQ(0, t.bucket_count());
 }
 
 TEST(Table, RehashZeroDeallocatesEmptyTable) {
-  IntTable t;
+  NonSooIntTable t;
   t.emplace(0);
   t.clear();
   EXPECT_NE(0, t.bucket_count());
@@ -1660,8 +1925,8 @@
   EXPECT_EQ(0, t.bucket_count());
 }
 
-TEST(Table, RehashZeroForcesRehash) {
-  IntTable t;
+TYPED_TEST(SooTest, RehashZeroForcesRehash) {
+  TypeParam t;
   t.emplace(0);
   t.emplace(1);
   auto* p = &*t.find(0);
@@ -1677,27 +1942,61 @@
   StringTable t = {P(), Q(), {}, {{}, {}}};
 }
 
-TEST(Table, CopyConstruct) {
-  IntTable t;
+TYPED_TEST(SooTest, CopyConstruct) {
+  TypeParam t;
   t.emplace(0);
   EXPECT_EQ(1, t.size());
   {
-    IntTable u(t);
+    TypeParam u(t);
     EXPECT_EQ(1, u.size());
     EXPECT_THAT(*u.find(0), 0);
   }
   {
-    IntTable u{t};
+    TypeParam u{t};
     EXPECT_EQ(1, u.size());
     EXPECT_THAT(*u.find(0), 0);
   }
   {
-    IntTable u = t;
+    TypeParam u = t;
     EXPECT_EQ(1, u.size());
     EXPECT_THAT(*u.find(0), 0);
   }
 }
 
+TYPED_TEST(SooTest, CopyDifferentSizes) {
+  TypeParam t;
+
+  for (int i = 0; i < 100; ++i) {
+    t.emplace(i);
+    TypeParam c = t;
+    for (int j = 0; j <= i; ++j) {
+      ASSERT_TRUE(c.find(j) != c.end()) << "i=" << i << " j=" << j;
+    }
+    // Testing find miss to verify that table is not full.
+    ASSERT_TRUE(c.find(-1) == c.end());
+  }
+}
+
+TYPED_TEST(SooTest, CopyDifferentCapacities) {
+  for (int cap = 1; cap < 100; cap = cap * 2 + 1) {
+    TypeParam t;
+    t.reserve(static_cast<size_t>(cap));
+    for (int i = 0; i <= cap; ++i) {
+      t.emplace(i);
+      if (i != cap && i % 5 != 0) {
+        continue;
+      }
+      TypeParam c = t;
+      for (int j = 0; j <= i; ++j) {
+        ASSERT_TRUE(c.find(j) != c.end())
+            << "cap=" << cap << " i=" << i << " j=" << j;
+      }
+      // Testing find miss to verify that table is not full.
+      ASSERT_TRUE(c.find(-1) == c.end());
+    }
+  }
+}
+
 TEST(Table, CopyConstructWithAlloc) {
   StringTable t;
   t.emplace("a", "b");
@@ -1827,8 +2126,8 @@
   EXPECT_NE(u, t);
 }
 
-TEST(Table, NumDeletedRegression) {
-  IntTable t;
+TYPED_TEST(SooTest, NumDeletedRegression) {
+  TypeParam t;
   t.emplace(0);
   t.erase(t.find(0));
   // construct over a deleted slot.
@@ -1836,8 +2135,8 @@
   t.clear();
 }
 
-TEST(Table, FindFullDeletedRegression) {
-  IntTable t;
+TYPED_TEST(SooTest, FindFullDeletedRegression) {
+  TypeParam t;
   for (int i = 0; i < 1000; ++i) {
     t.emplace(i);
     t.erase(t.find(i));
@@ -1845,17 +2144,20 @@
   EXPECT_EQ(0, t.size());
 }
 
-TEST(Table, ReplacingDeletedSlotDoesNotRehash) {
+TYPED_TEST(SooTest, ReplacingDeletedSlotDoesNotRehash) {
+  // We need to disable hashtablez to avoid issues related to SOO and sampling.
+  SetHashtablezEnabled(false);
+
   size_t n;
   {
     // Compute n such that n is the maximum number of elements before rehash.
-    IntTable t;
+    TypeParam t;
     t.emplace(0);
     size_t c = t.bucket_count();
     for (n = 1; c == t.bucket_count(); ++n) t.emplace(n);
     --n;
   }
-  IntTable t;
+  TypeParam t;
   t.rehash(n);
   const size_t c = t.bucket_count();
   for (size_t i = 0; i != n; ++i) t.emplace(i);
@@ -2106,8 +2408,8 @@
   EXPECT_FALSE(node);  // NOLINT(bugprone-use-after-move)
 }
 
-TEST(Nodes, HintInsert) {
-  IntTable t = {1, 2, 3};
+TYPED_TEST(SooTest, HintInsert) {
+  TypeParam t = {1, 2, 3};
   auto node = t.extract(1);
   EXPECT_THAT(t, UnorderedElementsAre(2, 3));
   auto it = t.insert(t.begin(), std::move(node));
@@ -2126,14 +2428,18 @@
   EXPECT_TRUE(node);  // NOLINT(bugprone-use-after-move)
 }
 
-IntTable MakeSimpleTable(size_t size) {
-  IntTable t;
+template <typename T>
+T MakeSimpleTable(size_t size) {
+  T t;
   while (t.size() < size) t.insert(t.size());
   return t;
 }
 
-std::vector<int> OrderOfIteration(const IntTable& t) {
-  return {t.begin(), t.end()};
+template <typename T>
+std::vector<int> OrderOfIteration(const T& t) {
+  std::vector<int> res;
+  for (auto i : t) res.push_back(static_cast<int>(i));
+  return res;
 }
 
 // These IterationOrderChanges tests depend on non-deterministic behavior.
@@ -2142,15 +2448,15 @@
 // we are touching different memory pages to cause the ordering to change.
 // We also need to keep the old tables around to avoid getting the same memory
 // blocks over and over.
-TEST(Table, IterationOrderChangesByInstance) {
+TYPED_TEST(SooTest, IterationOrderChangesByInstance) {
   for (size_t size : {2, 6, 12, 20}) {
-    const auto reference_table = MakeSimpleTable(size);
+    const auto reference_table = MakeSimpleTable<TypeParam>(size);
     const auto reference = OrderOfIteration(reference_table);
 
-    std::vector<IntTable> tables;
+    std::vector<TypeParam> tables;
     bool found_difference = false;
     for (int i = 0; !found_difference && i < 5000; ++i) {
-      tables.push_back(MakeSimpleTable(size));
+      tables.push_back(MakeSimpleTable<TypeParam>(size));
       found_difference = OrderOfIteration(tables.back()) != reference;
     }
     if (!found_difference) {
@@ -2161,27 +2467,44 @@
   }
 }
 
-TEST(Table, IterationOrderChangesOnRehash) {
-  std::vector<IntTable> garbage;
-  for (int i = 0; i < 5000; ++i) {
-    auto t = MakeSimpleTable(20);
-    const auto reference = OrderOfIteration(t);
-    // Force rehash to the same size.
-    t.rehash(0);
-    auto trial = OrderOfIteration(t);
-    if (trial != reference) {
-      // We are done.
-      return;
+TYPED_TEST(SooTest, IterationOrderChangesOnRehash) {
+  // We test different sizes with many small numbers, because small table
+  // resize has a different codepath.
+  // Note: iteration order for size() <= 1 is always the same.
+  for (size_t size : std::vector<size_t>{2, 3, 6, 7, 12, 15, 20, 50}) {
+    for (size_t rehash_size : {
+             size_t{0},  // Force rehash is guaranteed.
+             size * 10   // Rehash to the larger capacity is guaranteed.
+         }) {
+      std::vector<TypeParam> garbage;
+      bool ok = false;
+      for (int i = 0; i < 5000; ++i) {
+        auto t = MakeSimpleTable<TypeParam>(size);
+        const auto reference = OrderOfIteration(t);
+        // Force rehash.
+        t.rehash(rehash_size);
+        auto trial = OrderOfIteration(t);
+        if (trial != reference) {
+          // We are done.
+          ok = true;
+          break;
+        }
+        garbage.push_back(std::move(t));
+      }
+      EXPECT_TRUE(ok)
+          << "Iteration order remained the same across many attempts " << size
+          << "->" << rehash_size << ".";
     }
-    garbage.push_back(std::move(t));
   }
-  FAIL() << "Iteration order remained the same across many attempts.";
 }
 
 // Verify that pointers are invalidated as soon as a second element is inserted.
 // This prevents dependency on pointer stability on small tables.
-TEST(Table, UnstablePointers) {
-  IntTable table;
+TYPED_TEST(SooTest, UnstablePointers) {
+  // We need to disable hashtablez to avoid issues related to SOO and sampling.
+  SetHashtablezEnabled(false);
+
+  TypeParam table;
 
   const auto addr = [&](int i) {
     return reinterpret_cast<uintptr_t>(&*table.find(i));
@@ -2200,11 +2523,11 @@
   if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
     GTEST_SKIP() << "Assertions not enabled.";
 
-  IntTable t;
+  NonSooIntTable t;
   // Extra simple "regexp" as regexp support is highly varied across platforms.
   EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()),
                             "erase.* called on end.. iterator.");
-  typename IntTable::iterator iter;
+  typename NonSooIntTable::iterator iter;
   EXPECT_DEATH_IF_SUPPORTED(
       ++iter, "operator.* called on default-constructed iterator.");
   t.insert(0);
@@ -2218,6 +2541,22 @@
   EXPECT_DEATH_IF_SUPPORTED(++iter, kErasedDeathMessage);
 }
 
+TEST(TableDeathTest, InvalidIteratorAssertsSoo) {
+  if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
+    GTEST_SKIP() << "Assertions not enabled.";
+
+  SooIntTable t;
+  // Extra simple "regexp" as regexp support is highly varied across platforms.
+  EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()),
+                            "erase.* called on end.. iterator.");
+  typename SooIntTable::iterator iter;
+  EXPECT_DEATH_IF_SUPPORTED(
+      ++iter, "operator.* called on default-constructed iterator.");
+
+  // We can't detect the erased iterator case as invalid in SOO mode because
+  // the control is static constant.
+}
+
 // Invalid iterator use can trigger use-after-free in asan/hwasan,
 // use-of-uninitialized-value in msan, or invalidated iterator assertions.
 constexpr const char* kInvalidIteratorDeathMessage =
@@ -2231,11 +2570,11 @@
 constexpr bool kMsvc = false;
 #endif
 
-TEST(TableDeathTest, IteratorInvalidAssertsEqualityOperator) {
+TYPED_TEST(SooTest, IteratorInvalidAssertsEqualityOperator) {
   if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
     GTEST_SKIP() << "Assertions not enabled.";
 
-  IntTable t;
+  TypeParam t;
   t.insert(1);
   t.insert(2);
   t.insert(3);
@@ -2254,38 +2593,55 @@
   t.erase(iter2);
   EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kErasedDeathMessage);
 
-  IntTable t1, t2;
+  TypeParam t1, t2;
   t1.insert(0);
   t2.insert(0);
   iter1 = t1.begin();
   iter2 = t2.begin();
   const char* const kContainerDiffDeathMessage =
       SwisstableGenerationsEnabled()
-          ? "Invalid iterator comparison.*iterators from different hashtables"
+          ? "Invalid iterator comparison.*iterators from different.* hashtables"
           : "Invalid iterator comparison.*may be from different "
             ".*containers.*config=asan";
   EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kContainerDiffDeathMessage);
   EXPECT_DEATH_IF_SUPPORTED(void(iter2 == iter1), kContainerDiffDeathMessage);
+}
 
-  for (int i = 0; i < 10; ++i) t1.insert(i);
-  // There should have been a rehash in t1.
-  if (kMsvc) return;  // MSVC doesn't support | in regex.
+TYPED_TEST(SooTest, IteratorInvalidAssertsEqualityOperatorRehash) {
+  if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
+    GTEST_SKIP() << "Assertions not enabled.";
+  if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regex.";
+#ifdef ABSL_HAVE_THREAD_SANITIZER
+  GTEST_SKIP() << "ThreadSanitizer test runs fail on use-after-free even in "
+                  "EXPECT_DEATH.";
+#endif
 
-  // NOTE(b/293887834): After rehashing, iterators will contain pointers to
-  // freed memory, which may be detected by ThreadSanitizer.
+  TypeParam t;
+  t.insert(0);
+  auto iter = t.begin();
+
+  // Trigger a rehash in t.
+  for (int i = 0; i < 10; ++i) t.insert(i);
+
   const char* const kRehashedDeathMessage =
       SwisstableGenerationsEnabled()
           ? kInvalidIteratorDeathMessage
-          : "Invalid iterator comparison.*might have rehashed.*config=asan"
-            "|ThreadSanitizer: heap-use-after-free";
-  EXPECT_DEATH_IF_SUPPORTED(void(iter1 == t1.begin()), kRehashedDeathMessage);
+          : "Invalid iterator comparison.*might have rehashed.*config=asan";
+  EXPECT_DEATH_IF_SUPPORTED(void(iter == t.begin()), kRehashedDeathMessage);
 }
 
 #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-TEST(RawHashSamplerTest, Sample) {
+template <typename T>
+class RawHashSamplerTest : public testing::Test {};
+
+using RawHashSamplerTestTypes = ::testing::Types<SooIntTable, NonSooIntTable>;
+TYPED_TEST_SUITE(RawHashSamplerTest, RawHashSamplerTestTypes);
+
+TYPED_TEST(RawHashSamplerTest, Sample) {
+  constexpr bool soo_enabled = std::is_same<SooIntTable, TypeParam>::value;
   // Enable the feature even if the prod default is off.
   SetHashtablezEnabled(true);
-  SetHashtablezSampleParameter(100);
+  SetHashtablezSampleParameter(100);  // Sample ~1% of tables.
 
   auto& sampler = GlobalHashtablezSampler();
   size_t start_size = 0;
@@ -2295,7 +2651,7 @@
     ++start_size;
   });
 
-  std::vector<IntTable> tables;
+  std::vector<TypeParam> tables;
   for (int i = 0; i < 1000000; ++i) {
     tables.emplace_back();
 
@@ -2319,15 +2675,23 @@
   absl::flat_hash_map<size_t, int> observed_checksums;
   absl::flat_hash_map<ssize_t, int> reservations;
   end_size += sampler.Iterate([&](const HashtablezInfo& info) {
-    if (preexisting_info.count(&info) == 0) {
-      observed_checksums[info.hashes_bitwise_xor.load(
-          std::memory_order_relaxed)]++;
-      reservations[info.max_reserve.load(std::memory_order_relaxed)]++;
-    }
-    EXPECT_EQ(info.inline_element_size, sizeof(int64_t));
     ++end_size;
+    if (preexisting_info.contains(&info)) return;
+    observed_checksums[info.hashes_bitwise_xor.load(
+        std::memory_order_relaxed)]++;
+    reservations[info.max_reserve.load(std::memory_order_relaxed)]++;
+    EXPECT_EQ(info.inline_element_size, sizeof(typename TypeParam::value_type));
+    EXPECT_EQ(info.key_size, sizeof(typename TypeParam::key_type));
+    EXPECT_EQ(info.value_size, sizeof(typename TypeParam::value_type));
+
+    if (soo_enabled) {
+      EXPECT_EQ(info.soo_capacity, SooCapacity());
+    } else {
+      EXPECT_EQ(info.soo_capacity, 0);
+    }
   });
 
+  // Expect that we sampled at the requested sampling rate of ~1%.
   EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
               0.01, 0.005);
   EXPECT_EQ(observed_checksums.size(), 5);
@@ -2344,12 +2708,141 @@
         << reservation;
   }
 }
+
+std::vector<const HashtablezInfo*> SampleSooMutation(
+    absl::FunctionRef<void(SooIntTable&)> mutate_table) {
+  // Enable the feature even if the prod default is off.
+  SetHashtablezEnabled(true);
+  SetHashtablezSampleParameter(100);  // Sample ~1% of tables.
+
+  auto& sampler = GlobalHashtablezSampler();
+  size_t start_size = 0;
+  absl::flat_hash_set<const HashtablezInfo*> preexisting_info;
+  start_size += sampler.Iterate([&](const HashtablezInfo& info) {
+    preexisting_info.insert(&info);
+    ++start_size;
+  });
+
+  std::vector<SooIntTable> tables;
+  for (int i = 0; i < 1000000; ++i) {
+    tables.emplace_back();
+    mutate_table(tables.back());
+  }
+  size_t end_size = 0;
+  std::vector<const HashtablezInfo*> infos;
+  end_size += sampler.Iterate([&](const HashtablezInfo& info) {
+    ++end_size;
+    if (preexisting_info.contains(&info)) return;
+    infos.push_back(&info);
+  });
+
+  // Expect that we sampled at the requested sampling rate of ~1%.
+  EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
+              0.01, 0.005);
+  return infos;
+}
+
+TEST(RawHashSamplerTest, SooTableInsertToEmpty) {
+  if (SooIntTable().capacity() != SooCapacity()) {
+    CHECK_LT(sizeof(void*), 8) << "missing SOO coverage";
+    GTEST_SKIP() << "not SOO on this platform";
+  }
+  std::vector<const HashtablezInfo*> infos =
+      SampleSooMutation([](SooIntTable& t) { t.insert(1); });
+
+  for (const HashtablezInfo* info : infos) {
+    ASSERT_EQ(info->inline_element_size,
+              sizeof(typename SooIntTable::value_type));
+    ASSERT_EQ(info->soo_capacity, SooCapacity());
+    ASSERT_EQ(info->capacity, NextCapacity(SooCapacity()));
+    ASSERT_EQ(info->size, 1);
+    ASSERT_EQ(info->max_reserve, 0);
+    ASSERT_EQ(info->num_erases, 0);
+    ASSERT_EQ(info->max_probe_length, 0);
+    ASSERT_EQ(info->total_probe_length, 0);
+  }
+}
+
+TEST(RawHashSamplerTest, SooTableReserveToEmpty) {
+  if (SooIntTable().capacity() != SooCapacity()) {
+    CHECK_LT(sizeof(void*), 8) << "missing SOO coverage";
+    GTEST_SKIP() << "not SOO on this platform";
+  }
+  std::vector<const HashtablezInfo*> infos =
+      SampleSooMutation([](SooIntTable& t) { t.reserve(100); });
+
+  for (const HashtablezInfo* info : infos) {
+    ASSERT_EQ(info->inline_element_size,
+              sizeof(typename SooIntTable::value_type));
+    ASSERT_EQ(info->soo_capacity, SooCapacity());
+    ASSERT_GE(info->capacity, 100);
+    ASSERT_EQ(info->size, 0);
+    ASSERT_EQ(info->max_reserve, 100);
+    ASSERT_EQ(info->num_erases, 0);
+    ASSERT_EQ(info->max_probe_length, 0);
+    ASSERT_EQ(info->total_probe_length, 0);
+  }
+}
+
+// This tests that reserve on a full SOO table doesn't incorrectly result in new
+// (over-)sampling.
+TEST(RawHashSamplerTest, SooTableReserveToFullSoo) {
+  if (SooIntTable().capacity() != SooCapacity()) {
+    CHECK_LT(sizeof(void*), 8) << "missing SOO coverage";
+    GTEST_SKIP() << "not SOO on this platform";
+  }
+  std::vector<const HashtablezInfo*> infos =
+      SampleSooMutation([](SooIntTable& t) {
+        t.insert(1);
+        t.reserve(100);
+      });
+
+  for (const HashtablezInfo* info : infos) {
+    ASSERT_EQ(info->inline_element_size,
+              sizeof(typename SooIntTable::value_type));
+    ASSERT_EQ(info->soo_capacity, SooCapacity());
+    ASSERT_GE(info->capacity, 100);
+    ASSERT_EQ(info->size, 1);
+    ASSERT_EQ(info->max_reserve, 100);
+    ASSERT_EQ(info->num_erases, 0);
+    ASSERT_EQ(info->max_probe_length, 0);
+    ASSERT_EQ(info->total_probe_length, 0);
+  }
+}
+
+// This tests that rehash(0) on a sampled table with size that fits in SOO
+// doesn't incorrectly result in losing sampling.
+TEST(RawHashSamplerTest, SooTableRehashShrinkWhenSizeFitsInSoo) {
+  if (SooIntTable().capacity() != SooCapacity()) {
+    CHECK_LT(sizeof(void*), 8) << "missing SOO coverage";
+    GTEST_SKIP() << "not SOO on this platform";
+  }
+  std::vector<const HashtablezInfo*> infos =
+      SampleSooMutation([](SooIntTable& t) {
+        t.reserve(100);
+        t.insert(1);
+        EXPECT_GE(t.capacity(), 100);
+        t.rehash(0);
+      });
+
+  for (const HashtablezInfo* info : infos) {
+    ASSERT_EQ(info->inline_element_size,
+              sizeof(typename SooIntTable::value_type));
+    ASSERT_EQ(info->soo_capacity, SooCapacity());
+    ASSERT_EQ(info->capacity, NextCapacity(SooCapacity()));
+    ASSERT_EQ(info->size, 1);
+    ASSERT_EQ(info->max_reserve, 100);
+    ASSERT_EQ(info->num_erases, 0);
+    ASSERT_EQ(info->max_probe_length, 0);
+    ASSERT_EQ(info->total_probe_length, 0);
+  }
+}
 #endif  // ABSL_INTERNAL_HASHTABLEZ_SAMPLE
 
 TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
   // Enable the feature even if the prod default is off.
   SetHashtablezEnabled(true);
-  SetHashtablezSampleParameter(100);
+  SetHashtablezSampleParameter(100);  // Sample ~1% of tables.
 
   auto& sampler = GlobalHashtablezSampler();
   size_t start_size = 0;
@@ -2371,9 +2864,10 @@
 template <class TableType>
 class SanitizerTest : public testing::Test {};
 
-TYPED_TEST_SUITE_P(SanitizerTest);
+using SanitizerTableTypes = ::testing::Types<IntTable, TransferableIntTable>;
+TYPED_TEST_SUITE(SanitizerTest, SanitizerTableTypes);
 
-TYPED_TEST_P(SanitizerTest, PoisoningUnused) {
+TYPED_TEST(SanitizerTest, PoisoningUnused) {
   TypeParam t;
   for (size_t reserve_size = 2; reserve_size < 1024;
        reserve_size = reserve_size * 3 / 2) {
@@ -2391,14 +2885,10 @@
   }
 }
 
-REGISTER_TYPED_TEST_SUITE_P(SanitizerTest, PoisoningUnused);
-using SanitizerTableTypes = ::testing::Types<IntTable, TransferableIntTable>;
-INSTANTIATE_TYPED_TEST_SUITE_P(InstanceSanitizerTest, SanitizerTest,
-                               SanitizerTableTypes);
-
+// TODO(b/289225379): poison inline space when empty SOO.
 TEST(Sanitizer, PoisoningOnErase) {
-  IntTable t;
-  int64_t& v = *t.insert(0).first;
+  NonSooIntTable t;
+  auto& v = *t.insert(0).first;
 
   EXPECT_FALSE(__asan_address_is_poisoned(&v));
   t.erase(0);
@@ -2446,7 +2936,7 @@
   if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
   if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
 
-  IntTable t;
+  NonSooIntTable t;
   // Start with 1 element so that `it` is never an end iterator.
   t.insert(-1);
   for (int i = 0; i < 10; ++i) {
@@ -2493,11 +2983,11 @@
   if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
   if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
 
-  IntTable t1, t2;
+  NonSooIntTable t1, t2;
   t1.insert(1);
   auto it = t1.begin();
   // ptr will become invalidated on rehash.
-  const int64_t* ptr = &*it;
+  const auto* ptr = &*it;
   (void)ptr;
 
   t2 = std::move(t1);
@@ -2505,12 +2995,12 @@
   EXPECT_DEATH_IF_SUPPORTED(void(it == t2.begin()),
                             kInvalidIteratorDeathMessage);
 #ifdef ABSL_HAVE_ADDRESS_SANITIZER
-  EXPECT_DEATH_IF_SUPPORTED(std::cout << *ptr, "heap-use-after-free");
+  EXPECT_DEATH_IF_SUPPORTED(std::cout << **ptr, "heap-use-after-free");
 #endif
 }
 
-TEST(Table, ReservedGrowthUpdatesWhenTableDoesntGrow) {
-  IntTable t;
+TYPED_TEST(SooTest, ReservedGrowthUpdatesWhenTableDoesntGrow) {
+  TypeParam t;
   for (int i = 0; i < 8; ++i) t.insert(i);
   // Want to insert twice without invalidating iterators so reserve.
   const size_t cap = t.capacity();
@@ -2524,6 +3014,213 @@
   EXPECT_EQ(*it, 0);
 }
 
+template <class TableType>
+class InstanceTrackerTest : public testing::Test {};
+
+using ::absl::test_internal::CopyableMovableInstance;
+using ::absl::test_internal::InstanceTracker;
+
+struct InstanceTrackerHash {
+  size_t operator()(const CopyableMovableInstance& t) const {
+    return absl::HashOf(t.value());
+  }
+};
+
+using InstanceTrackerTableTypes = ::testing::Types<
+    absl::node_hash_set<CopyableMovableInstance, InstanceTrackerHash>,
+    absl::flat_hash_set<CopyableMovableInstance, InstanceTrackerHash>>;
+TYPED_TEST_SUITE(InstanceTrackerTest, InstanceTrackerTableTypes);
+
+TYPED_TEST(InstanceTrackerTest, EraseIfAll) {
+  using Table = TypeParam;
+  InstanceTracker tracker;
+  for (int size = 0; size < 100; ++size) {
+    Table t;
+    for (int i = 0; i < size; ++i) {
+      t.emplace(i);
+    }
+    absl::erase_if(t, [](const auto&) { return true; });
+    ASSERT_EQ(t.size(), 0);
+  }
+  EXPECT_EQ(tracker.live_instances(), 0);
+}
+
+TYPED_TEST(InstanceTrackerTest, EraseIfNone) {
+  using Table = TypeParam;
+  InstanceTracker tracker;
+  {
+    Table t;
+    for (size_t size = 0; size < 100; ++size) {
+      absl::erase_if(t, [](const auto&) { return false; });
+      ASSERT_EQ(t.size(), size);
+      t.emplace(size);
+    }
+  }
+  EXPECT_EQ(tracker.live_instances(), 0);
+}
+
+TYPED_TEST(InstanceTrackerTest, EraseIfPartial) {
+  using Table = TypeParam;
+  InstanceTracker tracker;
+  for (int mod : {0, 1}) {
+    for (int size = 0; size < 100; ++size) {
+      SCOPED_TRACE(absl::StrCat(mod, " ", size));
+      Table t;
+      std::vector<CopyableMovableInstance> expected;
+      for (int i = 0; i < size; ++i) {
+        t.emplace(i);
+        if (i % 2 != mod) {
+          expected.emplace_back(i);
+        }
+      }
+      absl::erase_if(t, [mod](const auto& x) { return x.value() % 2 == mod; });
+      ASSERT_THAT(t, testing::UnorderedElementsAreArray(expected));
+    }
+  }
+  EXPECT_EQ(tracker.live_instances(), 0);
+}
+
+TYPED_TEST(SooTest, EraseIfAll) {
+  auto pred = [](const auto&) { return true; };
+  for (int size = 0; size < 100; ++size) {
+    TypeParam t;
+    for (int i = 0; i < size; ++i) t.insert(i);
+    absl::container_internal::EraseIf(pred, &t);
+    ASSERT_EQ(t.size(), 0);
+  }
+}
+
+TYPED_TEST(SooTest, EraseIfNone) {
+  auto pred = [](const auto&) { return false; };
+  TypeParam t;
+  for (size_t size = 0; size < 100; ++size) {
+    absl::container_internal::EraseIf(pred, &t);
+    ASSERT_EQ(t.size(), size);
+    t.insert(size);
+  }
+}
+
+TYPED_TEST(SooTest, EraseIfPartial) {
+  for (int mod : {0, 1}) {
+    auto pred = [&](const auto& x) {
+      return static_cast<int64_t>(x) % 2 == mod;
+    };
+    for (int size = 0; size < 100; ++size) {
+      SCOPED_TRACE(absl::StrCat(mod, " ", size));
+      TypeParam t;
+      std::vector<int64_t> expected;
+      for (int i = 0; i < size; ++i) {
+        t.insert(i);
+        if (i % 2 != mod) {
+          expected.push_back(i);
+        }
+      }
+      absl::container_internal::EraseIf(pred, &t);
+      ASSERT_THAT(t, testing::UnorderedElementsAreArray(expected));
+    }
+  }
+}
+
+TYPED_TEST(SooTest, ForEach) {
+  TypeParam t;
+  std::vector<int64_t> expected;
+  for (int size = 0; size < 100; ++size) {
+    SCOPED_TRACE(size);
+    {
+      SCOPED_TRACE("mutable iteration");
+      std::vector<int64_t> actual;
+      auto f = [&](auto& x) { actual.push_back(static_cast<int64_t>(x)); };
+      absl::container_internal::ForEach(f, &t);
+      ASSERT_THAT(actual, testing::UnorderedElementsAreArray(expected));
+    }
+    {
+      SCOPED_TRACE("const iteration");
+      std::vector<int64_t> actual;
+      auto f = [&](auto& x) {
+        static_assert(
+            std::is_const<std::remove_reference_t<decltype(x)>>::value,
+            "no mutable values should be passed to const ForEach");
+        actual.push_back(static_cast<int64_t>(x));
+      };
+      const auto& ct = t;
+      absl::container_internal::ForEach(f, &ct);
+      ASSERT_THAT(actual, testing::UnorderedElementsAreArray(expected));
+    }
+    t.insert(size);
+    expected.push_back(size);
+  }
+}
+
+TEST(Table, ForEachMutate) {
+  StringTable t;
+  using ValueType = StringTable::value_type;
+  std::vector<ValueType> expected;
+  for (int size = 0; size < 100; ++size) {
+    SCOPED_TRACE(size);
+    std::vector<ValueType> actual;
+    auto f = [&](ValueType& x) {
+      actual.push_back(x);
+      x.second += "a";
+    };
+    absl::container_internal::ForEach(f, &t);
+    ASSERT_THAT(actual, testing::UnorderedElementsAreArray(expected));
+    for (ValueType& v : expected) {
+      v.second += "a";
+    }
+    ASSERT_THAT(t, testing::UnorderedElementsAreArray(expected));
+    t.emplace(std::to_string(size), std::to_string(size));
+    expected.emplace_back(std::to_string(size), std::to_string(size));
+  }
+}
+
+TYPED_TEST(SooTest, EraseIfReentryDeath) {
+  if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
+
+  auto erase_if_with_removal_reentrance = [](size_t reserve_size) {
+    TypeParam t;
+    t.reserve(reserve_size);
+    int64_t first_value = -1;
+    t.insert(1024);
+    t.insert(5078);
+    auto pred = [&](const auto& x) {
+      if (first_value == -1) {
+        first_value = static_cast<int64_t>(x);
+        return false;
+      }
+      // We erase on second call to `pred` to reduce the chance that assertion
+      // will happen in IterateOverFullSlots.
+      t.erase(first_value);
+      return true;
+    };
+    absl::container_internal::EraseIf(pred, &t);
+  };
+  // Removal will likely happen in a different group.
+  EXPECT_DEATH_IF_SUPPORTED(erase_if_with_removal_reentrance(1024 * 16),
+                            "hash table was modified unexpectedly");
+  // Removal will happen in the same group.
+  EXPECT_DEATH_IF_SUPPORTED(
+      erase_if_with_removal_reentrance(CapacityToGrowth(Group::kWidth - 1)),
+      "hash table was modified unexpectedly");
+}
+
+// This test is useful to test soo branch.
+TYPED_TEST(SooTest, EraseIfReentrySingleElementDeath) {
+  if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
+
+  auto erase_if_with_removal_reentrance = []() {
+    TypeParam t;
+    t.insert(1024);
+    auto pred = [&](const auto& x) {
+      // We erase ourselves in order to confuse the erase_if.
+      t.erase(static_cast<int64_t>(x));
+      return false;
+    };
+    absl::container_internal::EraseIf(pred, &t);
+  };
+  EXPECT_DEATH_IF_SUPPORTED(erase_if_with_removal_reentrance(),
+                            "hash table was modified unexpectedly");
+}
+
 TEST(Table, EraseBeginEndResetsReservedGrowth) {
   bool frozen = false;
   BadHashFreezableIntTable t{FreezableAlloc<int64_t>(&frozen)};
@@ -2534,7 +3231,8 @@
   for (int i = 0; i < 10; ++i) {
     // Create a long run (hash function returns constant).
     for (int j = 0; j < 100; ++j) t.insert(j);
-    // Erase elements from the middle of the long run, which creates tombstones.
+    // Erase elements from the middle of the long run, which creates
+    // tombstones.
     for (int j = 30; j < 60; ++j) t.erase(j);
     EXPECT_EQ(t.size(), 70);
     EXPECT_EQ(t.capacity(), cap);
@@ -2552,7 +3250,7 @@
   if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
   if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
 
-  IntTable t;
+  NonSooIntTable t;
   for (int i = 0; i < 1000; ++i) t.insert(i);
   t.reserve(t.size() + 100);
 
@@ -2570,24 +3268,24 @@
   GTEST_SKIP() << "MSan fails to detect some of these rehashes.";
 #endif
 
-  IntTable t;
+  NonSooIntTable t;
   t.insert(0);
   // Rehashing is guaranteed on every insertion while capacity is less than
   // RehashProbabilityConstant().
-  int64_t i = 0;
+  int i = 0;
   while (t.capacity() <= RehashProbabilityConstant()) {
     // ptr will become invalidated on rehash.
-    const int64_t* ptr = &*t.begin();
+    const auto* ptr = &*t.begin();
     t.insert(++i);
-    EXPECT_DEATH_IF_SUPPORTED(std::cout << *ptr, "use-after-free") << i;
+    EXPECT_DEATH_IF_SUPPORTED(std::cout << **ptr, "use-after-free") << i;
   }
 }
 
 TEST(Iterator, InvalidComparisonDifferentTables) {
   if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
 
-  IntTable t1, t2;
-  IntTable::iterator default_constructed_iter;
+  NonSooIntTable t1, t2;
+  NonSooIntTable::iterator default_constructed_iter;
   // We randomly use one of N empty generations for generations from empty
   // hashtables. In general, we won't always detect when iterators from
   // different empty hashtables are compared, but in this test case, we
@@ -2616,7 +3314,7 @@
 TEST(Table, AllocatorPropagation) { TestAllocPropagation<RawHashSetAlloc>(); }
 
 struct CountedHash {
-  size_t operator()(int value) const {
+  size_t operator()(int64_t value) const {
     ++count;
     return static_cast<size_t>(value);
   }
@@ -2678,6 +3376,224 @@
   }
 }
 
+// IterateOverFullSlots doesn't support SOO.
+TEST(Table, IterateOverFullSlotsEmpty) {
+  NonSooIntTable t;
+  auto fail_if_any = [](const ctrl_t*, auto* i) {
+    FAIL() << "expected no slots " << **i;
+  };
+  container_internal::IterateOverFullSlots(
+      RawHashSetTestOnlyAccess::GetCommon(t),
+      RawHashSetTestOnlyAccess::GetSlots(t), fail_if_any);
+  for (size_t i = 0; i < 256; ++i) {
+    t.reserve(i);
+    container_internal::IterateOverFullSlots(
+        RawHashSetTestOnlyAccess::GetCommon(t),
+        RawHashSetTestOnlyAccess::GetSlots(t), fail_if_any);
+  }
+}
+
+TEST(Table, IterateOverFullSlotsFull) {
+  NonSooIntTable t;
+
+  std::vector<int64_t> expected_slots;
+  for (int64_t idx = 0; idx < 128; ++idx) {
+    t.insert(idx);
+    expected_slots.push_back(idx);
+
+    std::vector<int64_t> slots;
+    container_internal::IterateOverFullSlots(
+        RawHashSetTestOnlyAccess::GetCommon(t),
+        RawHashSetTestOnlyAccess::GetSlots(t),
+        [&t, &slots](const ctrl_t* ctrl, auto* i) {
+          ptrdiff_t ctrl_offset =
+              ctrl - RawHashSetTestOnlyAccess::GetCommon(t).control();
+          ptrdiff_t slot_offset = i - RawHashSetTestOnlyAccess::GetSlots(t);
+          ASSERT_EQ(ctrl_offset, slot_offset);
+          slots.push_back(**i);
+        });
+    EXPECT_THAT(slots, testing::UnorderedElementsAreArray(expected_slots));
+  }
+}
+
+TEST(Table, IterateOverFullSlotsDeathOnRemoval) {
+  if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
+
+  auto iterate_with_reentrant_removal = [](int64_t size,
+                                           int64_t reserve_size = -1) {
+    if (reserve_size == -1) reserve_size = size;
+    for (int64_t idx = 0; idx < size; ++idx) {
+      NonSooIntTable t;
+      t.reserve(static_cast<size_t>(reserve_size));
+      for (int val = 0; val <= idx; ++val) {
+        t.insert(val);
+      }
+
+      container_internal::IterateOverFullSlots(
+          RawHashSetTestOnlyAccess::GetCommon(t),
+          RawHashSetTestOnlyAccess::GetSlots(t),
+          [&t](const ctrl_t*, auto* i) {
+            int64_t value = **i;
+            // Erase the other element from 2*k and 2*k+1 pair.
+            t.erase(value ^ 1);
+          });
+    }
+  };
+
+  EXPECT_DEATH_IF_SUPPORTED(iterate_with_reentrant_removal(128),
+                            "hash table was modified unexpectedly");
+  // Removal will likely happen in a different group.
+  EXPECT_DEATH_IF_SUPPORTED(iterate_with_reentrant_removal(14, 1024 * 16),
+                            "hash table was modified unexpectedly");
+  // Removal will happen in the same group.
+  EXPECT_DEATH_IF_SUPPORTED(iterate_with_reentrant_removal(static_cast<int64_t>(
+                                CapacityToGrowth(Group::kWidth - 1))),
+                            "hash table was modified unexpectedly");
+}
+
+TEST(Table, IterateOverFullSlotsDeathOnInsert) {
+  if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
+
+  auto iterate_with_reentrant_insert = [](int64_t reserve_size,
+                                          int64_t size_divisor = 2) {
+    int64_t size = reserve_size / size_divisor;
+    for (int64_t idx = 1; idx <= size; ++idx) {
+      NonSooIntTable t;
+      t.reserve(static_cast<size_t>(reserve_size));
+      for (int val = 1; val <= idx; ++val) {
+        t.insert(val);
+      }
+
+      container_internal::IterateOverFullSlots(
+          RawHashSetTestOnlyAccess::GetCommon(t),
+          RawHashSetTestOnlyAccess::GetSlots(t),
+          [&t](const ctrl_t*, auto* i) {
+            int64_t value = **i;
+            t.insert(-value);
+          });
+    }
+  };
+
+  EXPECT_DEATH_IF_SUPPORTED(iterate_with_reentrant_insert(128),
+                            "hash table was modified unexpectedly");
+  // Insert will likely happen in a different group.
+  EXPECT_DEATH_IF_SUPPORTED(iterate_with_reentrant_insert(1024 * 16, 1024 * 2),
+                            "hash table was modified unexpectedly");
+  // Insert will happen in the same group.
+  EXPECT_DEATH_IF_SUPPORTED(iterate_with_reentrant_insert(static_cast<int64_t>(
+                                CapacityToGrowth(Group::kWidth - 1))),
+                            "hash table was modified unexpectedly");
+}
+
+template <typename T>
+class SooTable : public testing::Test {};
+using FreezableSooTableTypes =
+    ::testing::Types<FreezableSizedValueSooTable<8>,
+                     FreezableSizedValueSooTable<16>>;
+TYPED_TEST_SUITE(SooTable, FreezableSooTableTypes);
+
+TYPED_TEST(SooTable, Basic) {
+  bool frozen = true;
+  TypeParam t{FreezableAlloc<typename TypeParam::value_type>(&frozen)};
+  if (t.capacity() != SooCapacity()) {
+    CHECK_LT(sizeof(void*), 8) << "missing SOO coverage";
+    GTEST_SKIP() << "not SOO on this platform";
+  }
+
+  t.insert(0);
+  EXPECT_EQ(t.capacity(), 1);
+  auto it = t.find(0);
+  EXPECT_EQ(it, t.begin());
+  ASSERT_NE(it, t.end());
+  EXPECT_EQ(*it, 0);
+  EXPECT_EQ(++it, t.end());
+  EXPECT_EQ(t.find(1), t.end());
+  EXPECT_EQ(t.size(), 1);
+
+  t.erase(0);
+  EXPECT_EQ(t.size(), 0);
+  t.insert(1);
+  it = t.find(1);
+  EXPECT_EQ(it, t.begin());
+  ASSERT_NE(it, t.end());
+  EXPECT_EQ(*it, 1);
+
+  t.clear();
+  EXPECT_EQ(t.size(), 0);
+}
+
+TEST(Table, RehashToSooUnsampled) {
+  SooIntTable t;
+  if (t.capacity() != SooCapacity()) {
+    CHECK_LT(sizeof(void*), 8) << "missing SOO coverage";
+    GTEST_SKIP() << "not SOO on this platform";
+  }
+
+  // We disable hashtablez sampling for this test to ensure that the table isn't
+  // sampled. When the table is sampled, it won't rehash down to SOO.
+  SetHashtablezEnabled(false);
+
+  t.reserve(100);
+  t.insert(0);
+  EXPECT_EQ(*t.begin(), 0);
+
+  t.rehash(0);  // Rehash back down to SOO table.
+
+  EXPECT_EQ(t.capacity(), SooCapacity());
+  EXPECT_EQ(t.size(), 1);
+  EXPECT_EQ(*t.begin(), 0);
+  EXPECT_EQ(t.find(0), t.begin());
+  EXPECT_EQ(t.find(1), t.end());
+}
+
+TEST(Table, ReserveToNonSoo) {
+  for (int reserve_capacity : {8, 100000}) {
+    SooIntTable t;
+    t.insert(0);
+
+    t.reserve(reserve_capacity);
+
+    EXPECT_EQ(t.find(0), t.begin());
+    EXPECT_EQ(t.size(), 1);
+    EXPECT_EQ(*t.begin(), 0);
+    EXPECT_EQ(t.find(1), t.end());
+  }
+}
+
+struct InconsistentHashEqType {
+  InconsistentHashEqType(int v1, int v2) : v1(v1), v2(v2) {}
+  template <typename H>
+  friend H AbslHashValue(H h, InconsistentHashEqType t) {
+    return H::combine(std::move(h), t.v1);
+  }
+  bool operator==(InconsistentHashEqType t) const { return v2 == t.v2; }
+  int v1, v2;
+};
+
+TEST(Iterator, InconsistentHashEqFunctorsValidation) {
+  if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
+
+  ValueTable<InconsistentHashEqType> t;
+  for (int i = 0; i < 10; ++i) t.insert({i, i});
+  // We need to find/insert multiple times to guarantee that we get the
+  // assertion because it's possible for the hash to collide with the inserted
+  // element that has v2==0. In those cases, the new element won't be inserted.
+  auto find_conflicting_elems = [&] {
+    for (int i = 100; i < 20000; ++i) {
+      EXPECT_EQ(t.find({i, 0}), t.end());
+    }
+  };
+  EXPECT_DEATH_IF_SUPPORTED(find_conflicting_elems(),
+                            "hash/eq functors are inconsistent.");
+  auto insert_conflicting_elems = [&] {
+    for (int i = 100; i < 20000; ++i) {
+      EXPECT_EQ(t.insert({i, 0}).second, false);
+    }
+  };
+  EXPECT_DEATH_IF_SUPPORTED(insert_conflicting_elems(),
+                            "hash/eq functors are inconsistent.");
+}
+
 }  // namespace
 }  // namespace container_internal
 ABSL_NAMESPACE_END
diff --git a/absl/container/node_hash_map.h b/absl/container/node_hash_map.h
index a396de2..5615e49 100644
--- a/absl/container/node_hash_map.h
+++ b/absl/container/node_hash_map.h
@@ -32,21 +32,25 @@
 // migration, because it guarantees pointer stability. Consider migrating to
 // `node_hash_map` and perhaps converting to a more efficient `flat_hash_map`
 // upon further review.
+//
+// `node_hash_map` is not exception-safe.
 
 #ifndef ABSL_CONTAINER_NODE_HASH_MAP_H_
 #define ABSL_CONTAINER_NODE_HASH_MAP_H_
 
-#include <tuple>
+#include <cstddef>
+#include <memory>
 #include <type_traits>
 #include <utility>
 
 #include "absl/algorithm/container.h"
-#include "absl/base/macros.h"
+#include "absl/base/attributes.h"
+#include "absl/container/hash_container_defaults.h"
 #include "absl/container/internal/container_memory.h"
-#include "absl/container/internal/hash_function_defaults.h"  // IWYU pragma: export
 #include "absl/container/internal/node_slot_policy.h"
 #include "absl/container/internal/raw_hash_map.h"  // IWYU pragma: export
 #include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
@@ -66,7 +70,7 @@
 //
 // * Supports heterogeneous lookup, through `find()`, `operator[]()` and
 //   `insert()`, provided that the map is provided a compatible heterogeneous
-//   hashing function and equality operator.
+//   hashing function and equality operator. See below for details.
 // * Contains a `capacity()` member function indicating the number of element
 //   slots (open, deleted, and empty) within the hash map.
 // * Returns `void` from the `erase(iterator)` overload.
@@ -82,6 +86,19 @@
 // libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may
 // be randomized across dynamically loaded libraries.
 //
+// To achieve heterogeneous lookup for custom types either `Hash` and `Eq` type
+// parameters can be used or `T` should have public inner types
+// `absl_container_hash` and (optionally) `absl_container_eq`. In either case,
+// `typename Hash::is_transparent` and `typename Eq::is_transparent` should be
+// well-formed. Both types are basically functors:
+// * `Hash` should support `size_t operator()(U val) const` that returns a hash
+// for the given `val`.
+// * `Eq` should support `bool operator()(U lhs, V rhs) const` that returns true
+// if `lhs` is equal to `rhs`.
+//
+// In most cases `T` needs only to provide the `absl_container_hash`. In this
+// case `std::equal_to<void>` will be used instead of `eq` part.
+//
 // Example:
 //
 //   // Create a node hash map of three strings (that map to strings)
@@ -100,11 +117,10 @@
 //  if (result != ducks.end()) {
 //    std::cout << "Result: " << result->second << std::endl;
 //  }
-template <class Key, class Value,
-          class Hash = absl::container_internal::hash_default_hash<Key>,
-          class Eq = absl::container_internal::hash_default_eq<Key>,
+template <class Key, class Value, class Hash = DefaultHashContainerHash<Key>,
+          class Eq = DefaultHashContainerEq<Key>,
           class Alloc = std::allocator<std::pair<const Key, Value>>>
-class node_hash_map
+class ABSL_INTERNAL_ATTRIBUTE_OWNER node_hash_map
     : public absl::container_internal::raw_hash_map<
           absl::container_internal::NodeHashMapPolicy<Key, Value>, Hash, Eq,
           Alloc> {
@@ -544,6 +560,38 @@
 
 namespace container_internal {
 
+// c_for_each_fast(node_hash_map<>, Function)
+//
+// Container-based version of the <algorithm> `std::for_each()` function to
+// apply a function to a container's elements.
+// There is no guarantees on the order of the function calls.
+// Erasure and/or insertion of elements in the function is not allowed.
+template <typename K, typename V, typename H, typename E, typename A,
+          typename Function>
+decay_t<Function> c_for_each_fast(const node_hash_map<K, V, H, E, A>& c,
+                                  Function&& f) {
+  container_internal::ForEach(f, &c);
+  return f;
+}
+template <typename K, typename V, typename H, typename E, typename A,
+          typename Function>
+decay_t<Function> c_for_each_fast(node_hash_map<K, V, H, E, A>& c,
+                                  Function&& f) {
+  container_internal::ForEach(f, &c);
+  return f;
+}
+template <typename K, typename V, typename H, typename E, typename A,
+          typename Function>
+decay_t<Function> c_for_each_fast(node_hash_map<K, V, H, E, A>&& c,
+                                  Function&& f) {
+  container_internal::ForEach(f, &c);
+  return f;
+}
+
+}  // namespace container_internal
+
+namespace container_internal {
+
 template <class Key, class Value>
 class NodeHashMapPolicy
     : public absl::container_internal::node_slot_policy<
@@ -590,6 +638,13 @@
 
   static Value& value(value_type* elem) { return elem->second; }
   static const Value& value(const value_type* elem) { return elem->second; }
+
+  template <class Hash>
+  static constexpr HashSlotFn get_hash_slot_fn() {
+    return memory_internal::IsLayoutCompatible<Key, Value>::value
+               ? &TypeErasedDerefAndApplyToSlotFn<Hash, Key>
+               : nullptr;
+  }
 };
 }  // namespace container_internal
 
diff --git a/absl/container/node_hash_map_test.cc b/absl/container/node_hash_map_test.cc
index 9bcf470..4ad5d0d 100644
--- a/absl/container/node_hash_map_test.cc
+++ b/absl/container/node_hash_map_test.cc
@@ -14,6 +14,18 @@
 
 #include "absl/container/node_hash_map.h"
 
+#include <cstddef>
+#include <new>
+#include <string>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+#include "absl/container/internal/hash_policy_testing.h"
 #include "absl/container/internal/tracked.h"
 #include "absl/container/internal/unordered_map_constructor_test.h"
 #include "absl/container/internal/unordered_map_lookup_test.h"
@@ -29,6 +41,7 @@
 using ::testing::IsEmpty;
 using ::testing::Pair;
 using ::testing::UnorderedElementsAre;
+using ::testing::UnorderedElementsAreArray;
 
 using MapTypes = ::testing::Types<
     absl::node_hash_map<int, int, StatefulTestingHash, StatefulTestingEqual,
@@ -257,6 +270,58 @@
   }
 }
 
+TEST(NodeHashMap, CForEach) {
+  node_hash_map<int, int> m;
+  std::vector<std::pair<int, int>> expected;
+  for (int i = 0; i < 100; ++i) {
+    {
+      SCOPED_TRACE("mutable object iteration");
+      std::vector<std::pair<int, int>> v;
+      absl::container_internal::c_for_each_fast(
+          m, [&v](std::pair<const int, int>& p) { v.push_back(p); });
+      EXPECT_THAT(v, UnorderedElementsAreArray(expected));
+    }
+    {
+      SCOPED_TRACE("const object iteration");
+      std::vector<std::pair<int, int>> v;
+      const node_hash_map<int, int>& cm = m;
+      absl::container_internal::c_for_each_fast(
+          cm, [&v](const std::pair<const int, int>& p) { v.push_back(p); });
+      EXPECT_THAT(v, UnorderedElementsAreArray(expected));
+    }
+    {
+      SCOPED_TRACE("const object iteration");
+      std::vector<std::pair<int, int>> v;
+      absl::container_internal::c_for_each_fast(
+          node_hash_map<int, int>(m),
+          [&v](std::pair<const int, int>& p) { v.push_back(p); });
+      EXPECT_THAT(v, UnorderedElementsAreArray(expected));
+    }
+    m[i] = i;
+    expected.emplace_back(i, i);
+  }
+}
+
+TEST(NodeHashMap, CForEachMutate) {
+  node_hash_map<int, int> s;
+  std::vector<std::pair<int, int>> expected;
+  for (int i = 0; i < 100; ++i) {
+    std::vector<std::pair<int, int>> v;
+    absl::container_internal::c_for_each_fast(
+        s, [&v](std::pair<const int, int>& p) {
+          v.push_back(p);
+          p.second++;
+        });
+    EXPECT_THAT(v, UnorderedElementsAreArray(expected));
+    for (auto& p : expected) {
+      p.second++;
+    }
+    EXPECT_THAT(s, UnorderedElementsAreArray(expected));
+    s[i] = i;
+    expected.emplace_back(i, i);
+  }
+}
+
 // This test requires std::launder for mutable key access in node handles.
 #if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
 TEST(NodeHashMap, NodeHandleMutableKeyAccess) {
diff --git a/absl/container/node_hash_set.h b/absl/container/node_hash_set.h
index 421ff46..53435ae 100644
--- a/absl/container/node_hash_set.h
+++ b/absl/container/node_hash_set.h
@@ -31,18 +31,24 @@
 // `node_hash_set` should be an easy migration. Consider migrating to
 // `node_hash_set` and perhaps converting to a more efficient `flat_hash_set`
 // upon further review.
+//
+// `node_hash_set` is not exception-safe.
 
 #ifndef ABSL_CONTAINER_NODE_HASH_SET_H_
 #define ABSL_CONTAINER_NODE_HASH_SET_H_
 
+#include <cstddef>
+#include <memory>
 #include <type_traits>
 
 #include "absl/algorithm/container.h"
-#include "absl/base/macros.h"
-#include "absl/container/internal/hash_function_defaults.h"  // IWYU pragma: export
+#include "absl/base/attributes.h"
+#include "absl/container/hash_container_defaults.h"
+#include "absl/container/internal/container_memory.h"
 #include "absl/container/internal/node_slot_policy.h"
 #include "absl/container/internal/raw_hash_set.h"  // IWYU pragma: export
 #include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
@@ -62,7 +68,7 @@
 //
 // * Supports heterogeneous lookup, through `find()`, `operator[]()` and
 //   `insert()`, provided that the set is provided a compatible heterogeneous
-//   hashing function and equality operator.
+//   hashing function and equality operator. See below for details.
 // * Contains a `capacity()` member function indicating the number of element
 //   slots (open, deleted, and empty) within the hash set.
 // * Returns `void` from the `erase(iterator)` overload.
@@ -78,6 +84,19 @@
 // libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may
 // be randomized across dynamically loaded libraries.
 //
+// To achieve heterogeneous lookup for custom types either `Hash` and `Eq` type
+// parameters can be used or `T` should have public inner types
+// `absl_container_hash` and (optionally) `absl_container_eq`. In either case,
+// `typename Hash::is_transparent` and `typename Eq::is_transparent` should be
+// well-formed. Both types are basically functors:
+// * `Hash` should support `size_t operator()(U val) const` that returns a hash
+// for the given `val`.
+// * `Eq` should support `bool operator()(U lhs, V rhs) const` that returns true
+// if `lhs` is equal to `rhs`.
+//
+// In most cases `T` needs only to provide the `absl_container_hash`. In this
+// case `std::equal_to<void>` will be used instead of `eq` part.
+//
 // Example:
 //
 //   // Create a node hash set of three strings
@@ -94,10 +113,9 @@
 //  if (ducks.contains("dewey")) {
 //    std::cout << "We found dewey!" << std::endl;
 //  }
-template <class T, class Hash = absl::container_internal::hash_default_hash<T>,
-          class Eq = absl::container_internal::hash_default_eq<T>,
-          class Alloc = std::allocator<T>>
-class node_hash_set
+template <class T, class Hash = DefaultHashContainerHash<T>,
+          class Eq = DefaultHashContainerEq<T>, class Alloc = std::allocator<T>>
+class ABSL_INTERNAL_ATTRIBUTE_OWNER node_hash_set
     : public absl::container_internal::raw_hash_set<
           absl::container_internal::NodeHashSetPolicy<T>, Hash, Eq, Alloc> {
   using Base = typename node_hash_set::raw_hash_set;
@@ -451,6 +469,33 @@
 
 namespace container_internal {
 
+// c_for_each_fast(node_hash_set<>, Function)
+//
+// Container-based version of the <algorithm> `std::for_each()` function to
+// apply a function to a container's elements.
+// There is no guarantees on the order of the function calls.
+// Erasure and/or insertion of elements in the function is not allowed.
+template <typename T, typename H, typename E, typename A, typename Function>
+decay_t<Function> c_for_each_fast(const node_hash_set<T, H, E, A>& c,
+                                  Function&& f) {
+  container_internal::ForEach(f, &c);
+  return f;
+}
+template <typename T, typename H, typename E, typename A, typename Function>
+decay_t<Function> c_for_each_fast(node_hash_set<T, H, E, A>& c, Function&& f) {
+  container_internal::ForEach(f, &c);
+  return f;
+}
+template <typename T, typename H, typename E, typename A, typename Function>
+decay_t<Function> c_for_each_fast(node_hash_set<T, H, E, A>&& c, Function&& f) {
+  container_internal::ForEach(f, &c);
+  return f;
+}
+
+}  // namespace container_internal
+
+namespace container_internal {
+
 template <class T>
 struct NodeHashSetPolicy
     : absl::container_internal::node_slot_policy<T&, NodeHashSetPolicy<T>> {
@@ -487,6 +532,11 @@
   }
 
   static size_t element_space_used(const T*) { return sizeof(T); }
+
+  template <class Hash>
+  static constexpr HashSlotFn get_hash_slot_fn() {
+    return &TypeErasedDerefAndApplyToSlotFn<Hash, T>;
+  }
 };
 }  // namespace container_internal
 
diff --git a/absl/container/node_hash_set_test.cc b/absl/container/node_hash_set_test.cc
index 98a8dbd..e616ac1 100644
--- a/absl/container/node_hash_set_test.cc
+++ b/absl/container/node_hash_set_test.cc
@@ -14,10 +14,22 @@
 
 #include "absl/container/node_hash_set.h"
 
+#include <cstddef>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+#include "absl/container/internal/hash_generator_testing.h"
+#include "absl/container/internal/hash_policy_testing.h"
 #include "absl/container/internal/unordered_set_constructor_test.h"
 #include "absl/container/internal/unordered_set_lookup_test.h"
 #include "absl/container/internal/unordered_set_members_test.h"
 #include "absl/container/internal/unordered_set_modifiers_test.h"
+#include "absl/memory/memory.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
@@ -28,6 +40,7 @@
 using ::testing::IsEmpty;
 using ::testing::Pointee;
 using ::testing::UnorderedElementsAre;
+using ::testing::UnorderedElementsAreArray;
 
 using SetTypes = ::testing::Types<
     node_hash_set<int, StatefulTestingHash, StatefulTestingEqual, Alloc<int>>,
@@ -137,6 +150,39 @@
   }
 }
 
+TEST(NodeHashSet, CForEach) {
+  using ValueType = std::pair<int, int>;
+  node_hash_set<ValueType> s;
+  std::vector<ValueType> expected;
+  for (int i = 0; i < 100; ++i) {
+    {
+      SCOPED_TRACE("mutable object iteration");
+      std::vector<ValueType> v;
+      absl::container_internal::c_for_each_fast(
+          s, [&v](const ValueType& p) { v.push_back(p); });
+      ASSERT_THAT(v, UnorderedElementsAreArray(expected));
+    }
+    {
+      SCOPED_TRACE("const object iteration");
+      std::vector<ValueType> v;
+      const node_hash_set<ValueType>& cs = s;
+      absl::container_internal::c_for_each_fast(
+          cs, [&v](const ValueType& p) { v.push_back(p); });
+      ASSERT_THAT(v, UnorderedElementsAreArray(expected));
+    }
+    {
+      SCOPED_TRACE("temporary object iteration");
+      std::vector<ValueType> v;
+      absl::container_internal::c_for_each_fast(
+          node_hash_set<ValueType>(s),
+          [&v](const ValueType& p) { v.push_back(p); });
+      ASSERT_THAT(v, UnorderedElementsAreArray(expected));
+    }
+    s.emplace(i, i);
+    expected.emplace_back(i, i);
+  }
+}
+
 }  // namespace
 }  // namespace container_internal
 ABSL_NAMESPACE_END
diff --git a/absl/container/sample_element_size_test.cc b/absl/container/sample_element_size_test.cc
index b23626b..22470b4 100644
--- a/absl/container/sample_element_size_test.cc
+++ b/absl/container/sample_element_size_test.cc
@@ -12,6 +12,11 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
+#include <cstddef>
+#include <unordered_set>
+#include <utility>
+#include <vector>
+
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
 #include "absl/container/flat_hash_map.h"
@@ -38,15 +43,16 @@
     // set cannot be flat_hash_set, however, since that would introduce a mutex
     // deadlock.
     std::unordered_set<const HashtablezInfo*>& preexisting_info,  // NOLINT
-    std::vector<Table>& tables, const typename Table::value_type& elt,
+    std::vector<Table>& tables,
+    const std::vector<typename Table::value_type>& values,
     size_t expected_element_size) {
   for (int i = 0; i < 10; ++i) {
     // We create a new table and must store it somewhere so that when we store
     // a pointer to the resulting `HashtablezInfo` into `preexisting_info`
     // that we aren't storing a dangling pointer.
     tables.emplace_back();
-    // We must insert an element to get a hashtablez to instantiate.
-    tables.back().insert(elt);
+    // We must insert elements to get a hashtablez to instantiate.
+    tables.back().insert(values.begin(), values.end());
   }
   size_t new_count = 0;
   sampler.Iterate([&](const HashtablezInfo& info) {
@@ -82,6 +88,9 @@
   std::vector<flat_hash_set<bigstruct>> flat_set_tables;
   std::vector<node_hash_map<int, bigstruct>> node_map_tables;
   std::vector<node_hash_set<bigstruct>> node_set_tables;
+  std::vector<bigstruct> set_values = {bigstruct{{0}}, bigstruct{{1}}};
+  std::vector<std::pair<const int, bigstruct>> map_values = {{0, bigstruct{}},
+                                                             {1, bigstruct{}}};
 
   // It takes thousands of new tables after changing the sampling parameters
   // before you actually get some instrumentation.  And if you must actually
@@ -97,14 +106,14 @@
   std::unordered_set<const HashtablezInfo*> preexisting_info;  // NOLINT
   sampler.Iterate(
       [&](const HashtablezInfo& info) { preexisting_info.insert(&info); });
-  TestInlineElementSize(sampler, preexisting_info, flat_map_tables,
-                        {0, bigstruct{}}, sizeof(int) + sizeof(bigstruct));
-  TestInlineElementSize(sampler, preexisting_info, node_map_tables,
-                        {0, bigstruct{}}, sizeof(void*));
-  TestInlineElementSize(sampler, preexisting_info, flat_set_tables,  //
-                        bigstruct{}, sizeof(bigstruct));
-  TestInlineElementSize(sampler, preexisting_info, node_set_tables,  //
-                        bigstruct{}, sizeof(void*));
+  TestInlineElementSize(sampler, preexisting_info, flat_map_tables, map_values,
+                        sizeof(int) + sizeof(bigstruct));
+  TestInlineElementSize(sampler, preexisting_info, node_map_tables, map_values,
+                        sizeof(void*));
+  TestInlineElementSize(sampler, preexisting_info, flat_set_tables, set_values,
+                        sizeof(bigstruct));
+  TestInlineElementSize(sampler, preexisting_info, node_set_tables, set_values,
+                        sizeof(void*));
 #endif
 }
 
diff --git a/absl/copts/AbseilConfigureCopts.cmake b/absl/copts/AbseilConfigureCopts.cmake
index 3f737c8..a618199 100644
--- a/absl/copts/AbseilConfigureCopts.cmake
+++ b/absl/copts/AbseilConfigureCopts.cmake
@@ -3,7 +3,7 @@
 
 set(ABSL_DEFAULT_LINKOPTS "")
 
-if (BUILD_SHARED_LIBS AND MSVC)
+if (BUILD_SHARED_LIBS AND (MSVC OR ABSL_BUILD_MONOLITHIC_SHARED_LIBS))
   set(ABSL_BUILD_DLL TRUE)
   set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON)
 else()
@@ -42,7 +42,7 @@
     string(TOUPPER "${_arch}" _arch_uppercase)
     string(REPLACE "X86_64" "X64" _arch_uppercase ${_arch_uppercase})
     foreach(_flag IN LISTS ABSL_RANDOM_HWAES_${_arch_uppercase}_FLAGS)
-      list(APPEND ABSL_RANDOM_RANDEN_COPTS "-Xarch_${_arch}" "${_flag}")
+      list(APPEND ABSL_RANDOM_RANDEN_COPTS "SHELL:-Xarch_${_arch} ${_flag}")
     endforeach()
   endforeach()
   # If a compiler happens to deal with an argument for a currently unused
diff --git a/absl/copts/GENERATED_AbseilCopts.cmake b/absl/copts/GENERATED_AbseilCopts.cmake
index 430916f..da2282f 100644
--- a/absl/copts/GENERATED_AbseilCopts.cmake
+++ b/absl/copts/GENERATED_AbseilCopts.cmake
@@ -44,6 +44,7 @@
     "-Wconversion-null"
     "-Wformat-security"
     "-Wmissing-declarations"
+    "-Wnon-virtual-dtor"
     "-Woverlength-strings"
     "-Wpointer-arith"
     "-Wundef"
@@ -61,6 +62,7 @@
     "-Wcast-qual"
     "-Wconversion-null"
     "-Wformat-security"
+    "-Wnon-virtual-dtor"
     "-Woverlength-strings"
     "-Wpointer-arith"
     "-Wundef"
@@ -82,8 +84,10 @@
 list(APPEND ABSL_LLVM_FLAGS
     "-Wall"
     "-Wextra"
+    "-Wc++98-compat-extra-semi"
     "-Wcast-qual"
     "-Wconversion"
+    "-Wdeprecated-pragma"
     "-Wfloat-overflow-conversion"
     "-Wfloat-zero-conversion"
     "-Wfor-loop-analysis"
@@ -120,8 +124,10 @@
 list(APPEND ABSL_LLVM_TEST_FLAGS
     "-Wall"
     "-Wextra"
+    "-Wc++98-compat-extra-semi"
     "-Wcast-qual"
     "-Wconversion"
+    "-Wdeprecated-pragma"
     "-Wfloat-overflow-conversion"
     "-Wfloat-zero-conversion"
     "-Wfor-loop-analysis"
diff --git a/absl/copts/GENERATED_copts.bzl b/absl/copts/GENERATED_copts.bzl
index 011d8a9..b9e0071 100644
--- a/absl/copts/GENERATED_copts.bzl
+++ b/absl/copts/GENERATED_copts.bzl
@@ -45,6 +45,7 @@
     "-Wconversion-null",
     "-Wformat-security",
     "-Wmissing-declarations",
+    "-Wnon-virtual-dtor",
     "-Woverlength-strings",
     "-Wpointer-arith",
     "-Wundef",
@@ -62,6 +63,7 @@
     "-Wcast-qual",
     "-Wconversion-null",
     "-Wformat-security",
+    "-Wnon-virtual-dtor",
     "-Woverlength-strings",
     "-Wpointer-arith",
     "-Wundef",
@@ -83,8 +85,10 @@
 ABSL_LLVM_FLAGS = [
     "-Wall",
     "-Wextra",
+    "-Wc++98-compat-extra-semi",
     "-Wcast-qual",
     "-Wconversion",
+    "-Wdeprecated-pragma",
     "-Wfloat-overflow-conversion",
     "-Wfloat-zero-conversion",
     "-Wfor-loop-analysis",
@@ -121,8 +125,10 @@
 ABSL_LLVM_TEST_FLAGS = [
     "-Wall",
     "-Wextra",
+    "-Wc++98-compat-extra-semi",
     "-Wcast-qual",
     "-Wconversion",
+    "-Wdeprecated-pragma",
     "-Wfloat-overflow-conversion",
     "-Wfloat-zero-conversion",
     "-Wfor-loop-analysis",
diff --git a/absl/copts/copts.py b/absl/copts/copts.py
index e6e1194..2d85ac7 100644
--- a/absl/copts/copts.py
+++ b/absl/copts/copts.py
@@ -18,6 +18,7 @@
     "-Wconversion-null",
     "-Wformat-security",
     "-Wmissing-declarations",
+    "-Wnon-virtual-dtor",
     "-Woverlength-strings",
     "-Wpointer-arith",
     "-Wundef",
@@ -43,8 +44,10 @@
 ABSL_LLVM_FLAGS = [
     "-Wall",
     "-Wextra",
+    "-Wc++98-compat-extra-semi",
     "-Wcast-qual",
     "-Wconversion",
+    "-Wdeprecated-pragma",
     "-Wfloat-overflow-conversion",
     "-Wfloat-zero-conversion",
     "-Wfor-loop-analysis",
diff --git a/absl/crc/BUILD.bazel b/absl/crc/BUILD.bazel
index f44c3f6..890d637 100644
--- a/absl/crc/BUILD.bazel
+++ b/absl/crc/BUILD.bazel
@@ -121,7 +121,9 @@
     hdrs = ["internal/non_temporal_arm_intrinsics.h"],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
-    visibility = ["//visibility:private"],
+    visibility = [
+        ":__pkg__",
+    ],
     deps = [
         "//absl/base:config",
     ],
@@ -132,7 +134,9 @@
     hdrs = ["internal/non_temporal_memcpy.h"],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
-    visibility = ["//visibility:private"],
+    visibility = [
+        ":__pkg__",
+    ],
     deps = [
         ":non_temporal_arm_intrinsics",
         "//absl/base:config",
@@ -182,8 +186,8 @@
     deps = [
         ":crc32c",
         "//absl/base:config",
+        "//absl/base:no_destructor",
         "//absl/numeric:bits",
-        "//absl/strings",
     ],
 )
 
@@ -203,7 +207,7 @@
 
 cc_binary(
     name = "crc32c_benchmark",
-    testonly = 1,
+    testonly = True,
     srcs = ["crc32c_benchmark.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
diff --git a/absl/crc/CMakeLists.txt b/absl/crc/CMakeLists.txt
index ec7b451..d52a1bc 100644
--- a/absl/crc/CMakeLists.txt
+++ b/absl/crc/CMakeLists.txt
@@ -159,6 +159,7 @@
     absl::crc32c
     absl::config
     absl::strings
+    absl::no_destructor
 )
 
 absl_cc_test(
diff --git a/absl/crc/internal/crc32_x86_arm_combined_simd.h b/absl/crc/internal/crc32_x86_arm_combined_simd.h
index 59995ae..0f6e347 100644
--- a/absl/crc/internal/crc32_x86_arm_combined_simd.h
+++ b/absl/crc/internal/crc32_x86_arm_combined_simd.h
@@ -33,14 +33,15 @@
 #include <x86intrin.h>
 #define ABSL_CRC_INTERNAL_HAVE_X86_SIMD
 
-#elif defined(_MSC_VER) && !defined(__clang__) && defined(__AVX__)
+#elif defined(_MSC_VER) && !defined(__clang__) && defined(__AVX__) && \
+    defined(_M_AMD64)
 
 // MSVC AVX (/arch:AVX) implies SSE 4.2 and PCLMULQDQ.
 #include <intrin.h>
 #define ABSL_CRC_INTERNAL_HAVE_X86_SIMD
 
-#elif defined(__aarch64__) && defined(__LITTLE_ENDIAN__) && \
-    defined(__ARM_FEATURE_CRC32) && defined(ABSL_INTERNAL_HAVE_ARM_NEON) &&  \
+#elif defined(__aarch64__) && defined(__LITTLE_ENDIAN__) &&                 \
+    defined(__ARM_FEATURE_CRC32) && defined(ABSL_INTERNAL_HAVE_ARM_NEON) && \
     defined(__ARM_FEATURE_CRYPTO)
 
 #include <arm_acle.h>
@@ -101,10 +102,11 @@
 // Produces an AND operation of |l| and |r|.
 V128 V128_And(const V128 l, const V128 r);
 
-// Sets two 64 bit integers to one 128 bit vector. The order is reverse.
+// Sets the lower half of a 128 bit register to the given 64-bit value and
+// zeroes the upper half.
 // dst[63:0] := |r|
-// dst[127:64] := |l|
-V128 V128_From2x64(const uint64_t l, const uint64_t r);
+// dst[127:64] := |0|
+V128 V128_From64WithZeroFill(const uint64_t r);
 
 // Shift |l| right by |imm| bytes while shifting in zeros.
 template <int imm>
@@ -121,8 +123,8 @@
 // Extracts the low 64 bits from V128.
 int64_t V128_Low64(const V128 l);
 
-// Left-shifts packed 64-bit integers in l by r.
-V128 V128_ShiftLeft64(const V128 l, const V128 r);
+// Add packed 64-bit integers in |l| and |r|.
+V128 V128_Add64(const V128 l, const V128 r);
 
 #endif
 
@@ -170,8 +172,8 @@
 
 inline V128 V128_And(const V128 l, const V128 r) { return _mm_and_si128(l, r); }
 
-inline V128 V128_From2x64(const uint64_t l, const uint64_t r) {
-  return _mm_set_epi64x(static_cast<int64_t>(l), static_cast<int64_t>(r));
+inline V128 V128_From64WithZeroFill(const uint64_t r) {
+  return _mm_set_epi64x(static_cast<int64_t>(0), static_cast<int64_t>(r));
 }
 
 template <int imm>
@@ -191,8 +193,8 @@
 
 inline int64_t V128_Low64(const V128 l) { return _mm_cvtsi128_si64(l); }
 
-inline V128 V128_ShiftLeft64(const V128 l, const V128 r) {
-  return _mm_sll_epi64(l, r);
+inline V128 V128_Add64(const V128 l, const V128 r) {
+  return _mm_add_epi64(l, r);
 }
 
 #elif defined(ABSL_CRC_INTERNAL_HAVE_ARM_SIMD)
@@ -261,10 +263,12 @@
 
 inline V128 V128_And(const V128 l, const V128 r) { return vandq_u64(l, r); }
 
-inline V128 V128_From2x64(const uint64_t l, const uint64_t r) {
-  return vcombine_u64(vcreate_u64(r), vcreate_u64(l));
+inline V128 V128_From64WithZeroFill(const uint64_t r){
+  constexpr uint64x2_t kZero = {0, 0};
+  return vsetq_lane_u64(r, kZero, 0);
 }
 
+
 template <int imm>
 inline V128 V128_ShiftRight(const V128 l) {
   return vreinterpretq_u64_s8(
@@ -285,9 +289,7 @@
   return vgetq_lane_s64(vreinterpretq_s64_u64(l), 0);
 }
 
-inline V128 V128_ShiftLeft64(const V128 l, const V128 r) {
-  return vshlq_u64(l, vreinterpretq_s64_u64(r));
-}
+inline V128 V128_Add64(const V128 l, const V128 r) { return vaddq_u64(l, r); }
 
 #endif
 
diff --git a/absl/crc/internal/crc_cord_state.cc b/absl/crc/internal/crc_cord_state.cc
index 28d04dc..303a555 100644
--- a/absl/crc/internal/crc_cord_state.cc
+++ b/absl/crc/internal/crc_cord_state.cc
@@ -17,6 +17,7 @@
 #include <cassert>
 
 #include "absl/base/config.h"
+#include "absl/base/no_destructor.h"
 #include "absl/numeric/bits.h"
 
 namespace absl {
@@ -24,14 +25,14 @@
 namespace crc_internal {
 
 CrcCordState::RefcountedRep* CrcCordState::RefSharedEmptyRep() {
-  static CrcCordState::RefcountedRep* empty = new CrcCordState::RefcountedRep;
+  static absl::NoDestructor<CrcCordState::RefcountedRep> empty;
 
   assert(empty->count.load(std::memory_order_relaxed) >= 1);
   assert(empty->rep.removed_prefix.length == 0);
   assert(empty->rep.prefix_crc.empty());
 
-  Ref(empty);
-  return empty;
+  Ref(empty.get());
+  return empty.get();
 }
 
 CrcCordState::CrcCordState() : refcounted_rep_(new RefcountedRep) {}
diff --git a/absl/crc/internal/crc_memcpy_fallback.cc b/absl/crc/internal/crc_memcpy_fallback.cc
index 0779550..e34249f 100644
--- a/absl/crc/internal/crc_memcpy_fallback.cc
+++ b/absl/crc/internal/crc_memcpy_fallback.cc
@@ -12,12 +12,13 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-#include <cstdint>
+#include <cstring>
 #include <memory>
 
 #include "absl/base/config.h"
 #include "absl/crc/crc32c.h"
 #include "absl/crc/internal/crc_memcpy.h"
+#include "absl/strings/string_view.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
diff --git a/absl/crc/internal/crc_memcpy_x86_arm_combined.cc b/absl/crc/internal/crc_memcpy_x86_arm_combined.cc
index 968e9ae..38f61e9 100644
--- a/absl/crc/internal/crc_memcpy_x86_arm_combined.cc
+++ b/absl/crc/internal/crc_memcpy_x86_arm_combined.cc
@@ -52,6 +52,7 @@
 #include <cstring>
 #include <memory>
 
+#include "absl/base/attributes.h"
 #include "absl/base/config.h"
 #include "absl/base/optimization.h"
 #include "absl/base/prefetch.h"
@@ -88,9 +89,11 @@
 constexpr size_t kIntLoadsPerVec = sizeof(V128) / sizeof(uint64_t);
 
 // Common function for copying the tails of multiple large regions.
+// Disable ubsan for benign unaligned access. See b/254108538.
 template <size_t vec_regions, size_t int_regions>
-inline void LargeTailCopy(crc32c_t* crcs, char** dst, const char** src,
-                          size_t region_size, size_t copy_rounds) {
+ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED inline void LargeTailCopy(
+    crc32c_t* crcs, char** dst, const char** src, size_t region_size,
+    size_t copy_rounds) {
   std::array<V128, vec_regions> data;
   std::array<uint64_t, kIntLoadsPerVec * int_regions> int_data;
 
@@ -127,8 +130,8 @@
         size_t data_index = i * kIntLoadsPerVec + j;
 
         int_data[data_index] = *(usrc + j);
-        crcs[region] = crc32c_t{static_cast<uint32_t>(CRC32_u64(
-            static_cast<uint32_t>(crcs[region]), int_data[data_index]))};
+        crcs[region] = crc32c_t{CRC32_u64(static_cast<uint32_t>(crcs[region]),
+                                          int_data[data_index])};
 
         *(udst + j) = int_data[data_index];
       }
@@ -155,8 +158,10 @@
                    std::size_t length, crc32c_t initial_crc) const override;
 };
 
+// Disable ubsan for benign unaligned access. See b/254108538.
 template <size_t vec_regions, size_t int_regions>
-crc32c_t AcceleratedCrcMemcpyEngine<vec_regions, int_regions>::Compute(
+ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED crc32c_t
+AcceleratedCrcMemcpyEngine<vec_regions, int_regions>::Compute(
     void* __restrict dst, const void* __restrict src, std::size_t length,
     crc32c_t initial_crc) const {
   constexpr std::size_t kRegions = vec_regions + int_regions;
@@ -196,7 +201,6 @@
 
   // Start work on the CRC: undo the XOR from the previous calculation or set up
   // the initial value of the CRC.
-  // initial_crc ^= kCrcDataXor;
   initial_crc = crc32c_t{static_cast<uint32_t>(initial_crc) ^ kCrcDataXor};
 
   // Do an initial alignment copy, so we can use aligned store instructions to
@@ -295,8 +299,8 @@
 
           // Load and CRC the data.
           int_data[data_index] = *(usrc + i * kIntLoadsPerVec + k);
-          crcs[region] = crc32c_t{static_cast<uint32_t>(CRC32_u64(
-              static_cast<uint32_t>(crcs[region]), int_data[data_index]))};
+          crcs[region] = crc32c_t{CRC32_u64(static_cast<uint32_t>(crcs[region]),
+                                            int_data[data_index])};
 
           // Store the data.
           *(udst + i * kIntLoadsPerVec + k) = int_data[data_index];
diff --git a/absl/crc/internal/crc_non_temporal_memcpy.cc b/absl/crc/internal/crc_non_temporal_memcpy.cc
index adc867f..a56f1eb 100644
--- a/absl/crc/internal/crc_non_temporal_memcpy.cc
+++ b/absl/crc/internal/crc_non_temporal_memcpy.cc
@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-#include <cstdint>
+#include <cstddef>
 
 #include "absl/base/config.h"
 #include "absl/crc/crc32c.h"
diff --git a/absl/crc/internal/crc_x86_arm_combined.cc b/absl/crc/internal/crc_x86_arm_combined.cc
index 51eff4e..79dace3 100644
--- a/absl/crc/internal/crc_x86_arm_combined.cc
+++ b/absl/crc/internal/crc_x86_arm_combined.cc
@@ -101,13 +101,17 @@
 namespace {
 
 uint32_t multiply(uint32_t a, uint32_t b) {
-  V128 shifts = V128_From2x64(0, 1);
-  V128 power = V128_From2x64(0, a);
-  V128 crc = V128_From2x64(0, b);
+  V128 power = V128_From64WithZeroFill(a);
+  V128 crc = V128_From64WithZeroFill(b);
   V128 res = V128_PMulLow(power, crc);
 
-  // Combine crc values
-  res = V128_ShiftLeft64(res, shifts);
+  // Combine crc values.
+  //
+  // Adding res to itself is equivalent to multiplying by 2,
+  // or shifting left by 1. Addition is used as not all compilers
+  // are able to generate optimal code without this hint.
+  // https://godbolt.org/z/rr3fMnf39
+  res = V128_Add64(res, res);
   return static_cast<uint32_t>(V128_Extract32<1>(res)) ^
          CRC32_u32(0, static_cast<uint32_t>(V128_Low64(res)));
 }
@@ -444,11 +448,11 @@
 
         V128 magic = *(reinterpret_cast<const V128*>(kClmulConstants) + bs - 1);
 
-        V128 tmp = V128_From2x64(0, l64);
+        V128 tmp = V128_From64WithZeroFill(l64);
 
         V128 res1 = V128_PMulLow(tmp, magic);
 
-        tmp = V128_From2x64(0, l641);
+        tmp = V128_From64WithZeroFill(l641);
 
         V128 res2 = V128_PMul10(tmp, magic);
         V128 x = V128_Xor(res1, res2);
diff --git a/absl/crc/internal/non_temporal_memcpy.h b/absl/crc/internal/non_temporal_memcpy.h
index b3d94ba..7ae83bd 100644
--- a/absl/crc/internal/non_temporal_memcpy.h
+++ b/absl/crc/internal/non_temporal_memcpy.h
@@ -19,19 +19,8 @@
 #include <intrin.h>
 #endif
 
-#ifdef __SSE__
-#include <xmmintrin.h>
-#endif
-
-#ifdef __SSE2__
-#include <emmintrin.h>
-#endif
-
-#ifdef __SSE3__
-#include <pmmintrin.h>
-#endif
-
-#ifdef __AVX__
+#if defined(__SSE__) || defined(__AVX__)
+// Pulls in both SSE and AVX intrinsics.
 #include <immintrin.h>
 #endif
 
@@ -44,6 +33,7 @@
 #include <cstdint>
 #include <cstring>
 
+#include "absl/base/attributes.h"
 #include "absl/base/config.h"
 #include "absl/base/optimization.h"
 
@@ -57,7 +47,9 @@
 // memcpy can save 1 DRAM load of the destination cacheline.
 constexpr size_t kCacheLineSize = ABSL_CACHELINE_SIZE;
 
-// If the objects overlap, the behavior is undefined.
+// If the objects overlap, the behavior is undefined. Uses regular memcpy
+// instead of non-temporal memcpy if the required CPU intrinsics are unavailable
+// at compile time.
 inline void *non_temporal_store_memcpy(void *__restrict dst,
                                        const void *__restrict src, size_t len) {
 #if defined(__SSE3__) || defined(__aarch64__) || \
@@ -119,10 +111,20 @@
 #endif  // __SSE3__ || __aarch64__ || (_MSC_VER && __AVX__)
 }
 
+// If the objects overlap, the behavior is undefined. Uses regular memcpy
+// instead of non-temporal memcpy if the required CPU intrinsics are unavailable
+// at compile time.
+#if ABSL_HAVE_CPP_ATTRIBUTE(gnu::target) && \
+    (defined(__x86_64__) || defined(__i386__))
+[[gnu::target("avx")]]
+#endif
 inline void *non_temporal_store_memcpy_avx(void *__restrict dst,
                                            const void *__restrict src,
                                            size_t len) {
-#ifdef __AVX__
+  // This function requires AVX. For clang and gcc we compile it with AVX even
+  // if the translation unit isn't built with AVX support. This works because we
+  // only select this implementation at runtime if the CPU supports AVX.
+#if defined(__SSE3__) || (defined(_MSC_VER) && defined(__AVX__))
   uint8_t *d = reinterpret_cast<uint8_t *>(dst);
   const uint8_t *s = reinterpret_cast<const uint8_t *>(src);
 
@@ -168,9 +170,8 @@
   }
   return dst;
 #else
-  // Fallback to regular memcpy when AVX is not available.
   return memcpy(dst, src, len);
-#endif  // __AVX__
+#endif  // __SSE3__ || (_MSC_VER && __AVX__)
 }
 
 }  // namespace crc_internal
diff --git a/absl/debugging/BUILD.bazel b/absl/debugging/BUILD.bazel
index 5baff7a..52b407c 100644
--- a/absl/debugging/BUILD.bazel
+++ b/absl/debugging/BUILD.bazel
@@ -228,9 +228,12 @@
         "//absl/debugging:__pkg__",
     ],
     deps = [
+        ":demangle_rust",
         "//absl/base",
         "//absl/base:config",
         "//absl/base:core_headers",
+        "//absl/base:nullability",
+        "//absl/numeric:bits",
     ],
 )
 
@@ -252,6 +255,106 @@
 )
 
 cc_library(
+    name = "bounded_utf8_length_sequence",
+    hdrs = ["internal/bounded_utf8_length_sequence.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        "//absl/base:config",
+        "//absl/numeric:bits",
+    ],
+)
+
+cc_test(
+    name = "bounded_utf8_length_sequence_test",
+    srcs = ["internal/bounded_utf8_length_sequence_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":bounded_utf8_length_sequence",
+        "//absl/base:config",
+        "@com_google_googletest//:gtest",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_library(
+    name = "decode_rust_punycode",
+    srcs = ["internal/decode_rust_punycode.cc"],
+    hdrs = ["internal/decode_rust_punycode.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":bounded_utf8_length_sequence",
+        ":utf8_for_code_point",
+        "//absl/base:config",
+        "//absl/base:nullability",
+    ],
+)
+
+cc_test(
+    name = "decode_rust_punycode_test",
+    srcs = ["internal/decode_rust_punycode_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":decode_rust_punycode",
+        "//absl/base:config",
+        "@com_google_googletest//:gtest",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_library(
+    name = "demangle_rust",
+    srcs = ["internal/demangle_rust.cc"],
+    hdrs = ["internal/demangle_rust.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":decode_rust_punycode",
+        "//absl/base:config",
+        "//absl/base:core_headers",
+    ],
+)
+
+cc_test(
+    name = "demangle_rust_test",
+    srcs = ["internal/demangle_rust_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":demangle_rust",
+        "//absl/base:config",
+        "//absl/base:core_headers",
+        "@com_google_googletest//:gtest",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_library(
+    name = "utf8_for_code_point",
+    srcs = ["internal/utf8_for_code_point.cc"],
+    hdrs = ["internal/utf8_for_code_point.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = ["//absl/base:config"],
+)
+
+cc_test(
+    name = "utf8_for_code_point_test",
+    srcs = ["internal/utf8_for_code_point_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":utf8_for_code_point",
+        "//absl/base:config",
+        "@com_google_googletest//:gtest",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_library(
     name = "leak_check",
     srcs = ["leak_check.cc"],
     hdrs = ["leak_check.h"],
diff --git a/absl/debugging/CMakeLists.txt b/absl/debugging/CMakeLists.txt
index 65e2af8..a96b4f3 100644
--- a/absl/debugging/CMakeLists.txt
+++ b/absl/debugging/CMakeLists.txt
@@ -201,8 +201,8 @@
   COPTS
     ${ABSL_DEFAULT_COPTS}
   DEPS
-    absl::base
-    absl::core_headers
+    absl::config
+    absl::demangle_rust
   PUBLIC
 )
 
@@ -223,6 +223,118 @@
     GTest::gmock_main
 )
 
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    bounded_utf8_length_sequence
+  HDRS
+    "internal/bounded_utf8_length_sequence.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::bits
+    absl::config
+)
+
+absl_cc_test(
+  NAME
+    bounded_utf8_length_sequence_test
+  SRCS
+    "internal/bounded_utf8_length_sequence_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::bounded_utf8_length_sequence
+    absl::config
+    GTest::gmock_main
+)
+
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    decode_rust_punycode
+  HDRS
+    "internal/decode_rust_punycode.h"
+  SRCS
+    "internal/decode_rust_punycode.cc"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::bounded_utf8_length_sequence
+    absl::config
+    absl::nullability
+    absl::utf8_for_code_point
+)
+
+absl_cc_test(
+  NAME
+    decode_rust_punycode_test
+  SRCS
+    "internal/decode_rust_punycode_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::decode_rust_punycode
+    absl::config
+    GTest::gmock_main
+)
+
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    demangle_rust
+  HDRS
+    "internal/demangle_rust.h"
+  SRCS
+    "internal/demangle_rust.cc"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::config
+    absl::core_headers
+    absl::decode_rust_punycode
+)
+
+absl_cc_test(
+  NAME
+    demangle_rust_test
+  SRCS
+    "internal/demangle_rust_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::demangle_rust
+    absl::config
+    GTest::gmock_main
+)
+
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    utf8_for_code_point
+  HDRS
+    "internal/utf8_for_code_point.h"
+  SRCS
+    "internal/utf8_for_code_point.cc"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::config
+)
+
+absl_cc_test(
+  NAME
+    utf8_for_code_point_test
+  SRCS
+    "internal/utf8_for_code_point_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::utf8_for_code_point
+    absl::config
+    GTest::gmock_main
+)
+
 absl_cc_library(
   NAME
     leak_check
diff --git a/absl/debugging/failure_signal_handler.h b/absl/debugging/failure_signal_handler.h
index 5e03478..4117fac 100644
--- a/absl/debugging/failure_signal_handler.h
+++ b/absl/debugging/failure_signal_handler.h
@@ -33,7 +33,7 @@
 // }
 //
 // Any program that raises a fatal signal (such as `SIGSEGV`, `SIGILL`,
-// `SIGFPE`, `SIGABRT`, `SIGTERM`, `SIGBUG`, and `SIGTRAP`) will call the
+// `SIGFPE`, `SIGABRT`, `SIGTERM`, `SIGBUS`, and `SIGTRAP`) will call the
 // installed failure signal handler and provide debugging information to stderr.
 //
 // Note that you should *not* install the Abseil failure signal handler more
diff --git a/absl/debugging/internal/bounded_utf8_length_sequence.h b/absl/debugging/internal/bounded_utf8_length_sequence.h
new file mode 100644
index 0000000..188e06c
--- /dev/null
+++ b/absl/debugging/internal/bounded_utf8_length_sequence.h
@@ -0,0 +1,126 @@
+// Copyright 2024 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_DEBUGGING_INTERNAL_BOUNDED_UTF8_LENGTH_SEQUENCE_H_
+#define ABSL_DEBUGGING_INTERNAL_BOUNDED_UTF8_LENGTH_SEQUENCE_H_
+
+#include <cstdint>
+
+#include "absl/base/config.h"
+#include "absl/numeric/bits.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+
+// A sequence of up to max_elements integers between 1 and 4 inclusive, whose
+// insertion operation computes the sum of all the elements before the insertion
+// point.  This is useful in decoding Punycode, where one needs to know where in
+// a UTF-8 byte stream the n-th code point begins.
+//
+// BoundedUtf8LengthSequence is async-signal-safe and suitable for use in
+// symbolizing stack traces in a signal handler, provided max_elements is not
+// improvidently large.  For inputs of lengths accepted by the Rust demangler,
+// up to a couple hundred code points, InsertAndReturnSumOfPredecessors should
+// run in a few dozen clock cycles, on par with the other arithmetic required
+// for Punycode decoding.
+template <uint32_t max_elements>
+class BoundedUtf8LengthSequence {
+ public:
+  // Constructs an empty sequence.
+  BoundedUtf8LengthSequence() = default;
+
+  // Inserts `utf_length` at position `index`, shifting any existing elements at
+  // or beyond `index` one position to the right.  If the sequence is already
+  // full, the rightmost element is discarded.
+  //
+  // Returns the sum of the elements at positions 0 to `index - 1` inclusive.
+  // If `index` is greater than the number of elements already inserted, the
+  // excess positions in the range count 1 apiece.
+  //
+  // REQUIRES: index < max_elements and 1 <= utf8_length <= 4.
+  uint32_t InsertAndReturnSumOfPredecessors(
+      uint32_t index, uint32_t utf8_length) {
+    // The caller shouldn't pass out-of-bounds inputs, but if it does happen,
+    // clamp the values and try to continue.  If we're being called from a
+    // signal handler, the last thing we want to do is crash.  Emitting
+    // malformed UTF-8 is a lesser evil.
+    if (index >= max_elements) index = max_elements - 1;
+    if (utf8_length == 0 || utf8_length > 4) utf8_length = 1;
+
+    const uint32_t word_index = index/32;
+    const uint32_t bit_index = 2 * (index % 32);
+    const uint64_t ones_bit = uint64_t{1} << bit_index;
+
+    // Compute the sum of predecessors.
+    //   - Each value from 1 to 4 is represented by a bit field with value from
+    //     0 to 3, so the desired sum is index plus the sum of the
+    //     representations actually stored.
+    //   - For each bit field, a set low bit should contribute 1 to the sum, and
+    //     a set high bit should contribute 2.
+    //   - Another way to say the same thing is that each set bit contributes 1,
+    //     and each set high bit contributes an additional 1.
+    //   - So the sum we want is index + popcount(everything) + popcount(bits in
+    //     odd positions).
+    const uint64_t odd_bits_mask = 0xaaaaaaaaaaaaaaaa;
+    const uint64_t lower_seminibbles_mask = ones_bit - 1;
+    const uint64_t higher_seminibbles_mask = ~lower_seminibbles_mask;
+    const uint64_t same_word_bits_below_insertion =
+        rep_[word_index] & lower_seminibbles_mask;
+    int full_popcount = absl::popcount(same_word_bits_below_insertion);
+    int odd_popcount =
+        absl::popcount(same_word_bits_below_insertion & odd_bits_mask);
+    for (uint32_t j = word_index; j > 0; --j) {
+      const uint64_t word_below_insertion = rep_[j - 1];
+      full_popcount += absl::popcount(word_below_insertion);
+      odd_popcount += absl::popcount(word_below_insertion & odd_bits_mask);
+    }
+    const uint32_t sum_of_predecessors =
+        index + static_cast<uint32_t>(full_popcount + odd_popcount);
+
+    // Now insert utf8_length's representation, shifting successors up one
+    // place.
+    for (uint32_t j = max_elements/32 - 1; j > word_index; --j) {
+      rep_[j] = (rep_[j] << 2) | (rep_[j - 1] >> 62);
+    }
+    rep_[word_index] =
+        (rep_[word_index] & lower_seminibbles_mask) |
+        (uint64_t{utf8_length - 1} << bit_index) |
+        ((rep_[word_index] & higher_seminibbles_mask) << 2);
+
+    return sum_of_predecessors;
+  }
+
+ private:
+  // If the (32 * i + j)-th element of the represented sequence has the value k
+  // (0 <= j < 32, 1 <= k <= 4), then bits 2 * j and 2 * j + 1 of rep_[i]
+  // contain the seminibble (k - 1).
+  //
+  // In particular, the zero-initialization of rep_ makes positions not holding
+  // any inserted element count as 1 in InsertAndReturnSumOfPredecessors.
+  //
+  // Example: rep_ = {0xb1, ... the rest zeroes ...} represents the sequence
+  // (2, 1, 4, 3, ... the rest 1's ...).  Constructing the sequence of Unicode
+  // code points "Àa🂻中" = {U+00C0, U+0061, U+1F0BB, U+4E2D} (among many
+  // other examples) would yield this value of rep_.
+  static_assert(max_elements > 0 && max_elements % 32 == 0,
+                "max_elements must be a positive multiple of 32");
+  uint64_t rep_[max_elements/32] = {};
+};
+
+}  // namespace debugging_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_DEBUGGING_INTERNAL_BOUNDED_UTF8_LENGTH_SEQUENCE_H_
diff --git a/absl/debugging/internal/bounded_utf8_length_sequence_test.cc b/absl/debugging/internal/bounded_utf8_length_sequence_test.cc
new file mode 100644
index 0000000..17a24fd
--- /dev/null
+++ b/absl/debugging/internal/bounded_utf8_length_sequence_test.cc
@@ -0,0 +1,126 @@
+// Copyright 2024 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/debugging/internal/bounded_utf8_length_sequence.h"
+
+#include <cstdint>
+
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+namespace {
+
+TEST(BoundedUtf8LengthSequenceTest, RemembersAValueOfOneCorrectly) {
+  BoundedUtf8LengthSequence<32> seq;
+  ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 1), 0);
+  EXPECT_EQ(seq.InsertAndReturnSumOfPredecessors(1, 1), 1);
+}
+
+TEST(BoundedUtf8LengthSequenceTest, RemembersAValueOfTwoCorrectly) {
+  BoundedUtf8LengthSequence<32> seq;
+  ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 2), 0);
+  EXPECT_EQ(seq.InsertAndReturnSumOfPredecessors(1, 1), 2);
+}
+
+TEST(BoundedUtf8LengthSequenceTest, RemembersAValueOfThreeCorrectly) {
+  BoundedUtf8LengthSequence<32> seq;
+  ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 3), 0);
+  EXPECT_EQ(seq.InsertAndReturnSumOfPredecessors(1, 1), 3);
+}
+
+TEST(BoundedUtf8LengthSequenceTest, RemembersAValueOfFourCorrectly) {
+  BoundedUtf8LengthSequence<32> seq;
+  ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 4), 0);
+  EXPECT_EQ(seq.InsertAndReturnSumOfPredecessors(1, 1), 4);
+}
+
+TEST(BoundedUtf8LengthSequenceTest, RemembersSeveralAppendedValues) {
+  BoundedUtf8LengthSequence<32> seq;
+  ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 1), 0);
+  ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(1, 4), 1);
+  ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(2, 2), 5);
+  ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(3, 3), 7);
+  ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(4, 1), 10);
+}
+
+TEST(BoundedUtf8LengthSequenceTest, RemembersSeveralPrependedValues) {
+  BoundedUtf8LengthSequence<32> seq;
+  ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 4), 0);
+  ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 3), 0);
+  ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 2), 0);
+  ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 1), 0);
+  ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(4, 1), 10);
+  ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(3, 1), 6);
+  ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(2, 1), 3);
+  ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(1, 1), 1);
+}
+
+TEST(BoundedUtf8LengthSequenceTest, RepeatedInsertsShiftValuesOutTheRightEnd) {
+  BoundedUtf8LengthSequence<32> seq;
+  ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 2), 0);
+  for (uint32_t i = 1; i < 31; ++i) {
+    ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 1), 0)
+        << "while moving the 2 into position " << i;
+    ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(31, 1), 32)
+        << "after moving the 2 into position " << i;
+  }
+  ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 1), 0)
+      << "while moving the 2 into position 31";
+  EXPECT_EQ(seq.InsertAndReturnSumOfPredecessors(31, 1), 31)
+      << "after moving the 2 into position 31";
+}
+
+TEST(BoundedUtf8LengthSequenceTest, InsertsIntoWord1LeaveWord0Untouched) {
+  BoundedUtf8LengthSequence<64> seq;
+  for (uint32_t i = 0; i < 32; ++i) {
+    ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(i, 2), 2 * i)
+        << "at index " << i;
+  }
+  EXPECT_EQ(seq.InsertAndReturnSumOfPredecessors(32, 1), 64);
+  EXPECT_EQ(seq.InsertAndReturnSumOfPredecessors(32, 1), 64);
+}
+
+TEST(BoundedUtf8LengthSequenceTest, InsertsIntoWord0ShiftValuesIntoWord1) {
+  BoundedUtf8LengthSequence<64> seq;
+  ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(29, 2), 29);
+  ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(30, 3), 31);
+  ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(31, 4), 34);
+
+  // Pushing two 1's on the front moves the 3 and 4 into the high word.
+  ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 1), 0);
+  ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 1), 0);
+  ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(34, 1), 31 + 2 + 3 + 4);
+  ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(32, 1), 31 + 2);
+}
+
+TEST(BoundedUtf8LengthSequenceTest, ValuesAreShiftedCorrectlyAmongThreeWords) {
+  BoundedUtf8LengthSequence<96> seq;
+  ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(31, 3), 31);
+  ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(63, 4), 62 + 3);
+
+  // This insertion moves both the 3 and the 4 up a word.
+  ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 1), 0);
+  ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(65, 1), 63 + 3 + 4);
+  ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(64, 1), 63 + 3);
+  ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(33, 1), 32 + 3);
+  ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(32, 1), 32);
+}
+
+}  // namespace
+}  // namespace debugging_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/absl/debugging/internal/decode_rust_punycode.cc b/absl/debugging/internal/decode_rust_punycode.cc
new file mode 100644
index 0000000..43b46bf
--- /dev/null
+++ b/absl/debugging/internal/decode_rust_punycode.cc
@@ -0,0 +1,258 @@
+// Copyright 2024 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/debugging/internal/decode_rust_punycode.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+
+#include "absl/base/config.h"
+#include "absl/base/nullability.h"
+#include "absl/debugging/internal/bounded_utf8_length_sequence.h"
+#include "absl/debugging/internal/utf8_for_code_point.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+
+namespace {
+
+// Decoding Punycode requires repeated random-access insertion into a stream of
+// variable-length UTF-8 code-point encodings.  We need this to be tolerably
+// fast (no N^2 slowdown for unfortunate inputs), and we can't allocate any data
+// structures on the heap (async-signal-safety).
+//
+// It is pragmatic to impose a moderately low limit on the identifier length and
+// bail out if we ever hit it.  Then BoundedUtf8LengthSequence efficiently
+// determines where to insert the next code point, and memmove efficiently makes
+// room for it.
+//
+// The chosen limit is a round number several times larger than identifiers
+// expected in practice, yet still small enough that a memmove of this many
+// UTF-8 characters is not much more expensive than the division and modulus
+// operations that Punycode decoding requires.
+constexpr uint32_t kMaxChars = 256;
+
+// Constants from RFC 3492 section 5.
+constexpr uint32_t kBase = 36, kTMin = 1, kTMax = 26, kSkew = 38, kDamp = 700;
+
+constexpr uint32_t kMaxCodePoint = 0x10ffff;
+
+// Overflow threshold in DecodeRustPunycode's inner loop; see comments there.
+constexpr uint32_t kMaxI = 1 << 30;
+
+// If punycode_begin .. punycode_end begins with a prefix matching the regular
+// expression [0-9a-zA-Z_]+_, removes that prefix, copies all but the final
+// underscore into out_begin .. out_end, sets num_ascii_chars to the number of
+// bytes copied, and returns true.  (A prefix of this sort represents the
+// nonempty subsequence of ASCII characters in the corresponding plaintext.)
+//
+// If punycode_begin .. punycode_end does not contain an underscore, sets
+// num_ascii_chars to zero and returns true.  (The encoding of a plaintext
+// without any ASCII characters does not carry such a prefix.)
+//
+// Returns false and zeroes num_ascii_chars on failure (either parse error or
+// not enough space in the output buffer).
+bool ConsumeOptionalAsciiPrefix(const char*& punycode_begin,
+                                const char* const punycode_end,
+                                char* const out_begin,
+                                char* const out_end,
+                                uint32_t& num_ascii_chars) {
+  num_ascii_chars = 0;
+
+  // Remember the last underscore if any.  Also use the same string scan to
+  // reject any ASCII bytes that do not belong in an identifier, including NUL,
+  // as well as non-ASCII bytes, which should have been delta-encoded instead.
+  int last_underscore = -1;
+  for (int i = 0; i < punycode_end - punycode_begin; ++i) {
+    const char c = punycode_begin[i];
+    if (c == '_') {
+      last_underscore = i;
+      continue;
+    }
+    // We write out the meaning of absl::ascii_isalnum rather than call that
+    // function because its documentation does not promise it will remain
+    // async-signal-safe under future development.
+    if ('a' <= c && c <= 'z') continue;
+    if ('A' <= c && c <= 'Z') continue;
+    if ('0' <= c && c <= '9') continue;
+    return false;
+  }
+
+  // If there was no underscore, that means there were no ASCII characters in
+  // the plaintext, so there is no prefix to consume.  Our work is done.
+  if (last_underscore < 0) return true;
+
+  // Otherwise there will be an underscore delimiter somewhere.  It can't be
+  // initial because then there would be no ASCII characters to its left, and no
+  // delimiter would have been added in that case.
+  if (last_underscore == 0) return false;
+
+  // Any other position is reasonable.  Make sure there's room in the buffer.
+  if (last_underscore + 1 > out_end - out_begin) return false;
+
+  // Consume and write out the ASCII characters.
+  num_ascii_chars = static_cast<uint32_t>(last_underscore);
+  std::memcpy(out_begin, punycode_begin, num_ascii_chars);
+  out_begin[num_ascii_chars] = '\0';
+  punycode_begin += num_ascii_chars + 1;
+  return true;
+}
+
+// Returns the value of `c` as a base-36 digit according to RFC 3492 section 5,
+// or -1 if `c` is not such a digit.
+int DigitValue(char c) {
+  if ('0' <= c && c <= '9') return c - '0' + 26;
+  if ('a' <= c && c <= 'z') return c - 'a';
+  if ('A' <= c && c <= 'Z') return c - 'A';
+  return -1;
+}
+
+// Consumes the next delta encoding from punycode_begin .. punycode_end,
+// updating i accordingly.  Returns true on success.  Returns false on parse
+// failure or arithmetic overflow.
+bool ScanNextDelta(const char*& punycode_begin, const char* const punycode_end,
+                   uint32_t bias, uint32_t& i) {
+  uint64_t w = 1;  // 64 bits to prevent overflow in w *= kBase - t
+
+  // "for k = base to infinity in steps of base do begin ... end" in RFC 3492
+  // section 6.2.  Each loop iteration scans one digit of the delta.
+  for (uint32_t k = kBase; punycode_begin != punycode_end; k += kBase) {
+    const int digit_value = DigitValue(*punycode_begin++);
+    if (digit_value < 0) return false;
+
+    // Compute this in 64-bit arithmetic so we can check for overflow afterward.
+    const uint64_t new_i = i + static_cast<uint64_t>(digit_value) * w;
+
+    // Valid deltas are bounded by (#chars already emitted) * kMaxCodePoint, but
+    // invalid input could encode an arbitrarily large delta.  Nip that in the
+    // bud here.
+    static_assert(
+        kMaxI >= kMaxChars * kMaxCodePoint,
+        "kMaxI is too small to prevent spurious failures on good input");
+    if (new_i > kMaxI) return false;
+
+    static_assert(
+        kMaxI < (uint64_t{1} << 32),
+        "Make kMaxI smaller or i 64 bits wide to prevent silent wraparound");
+    i = static_cast<uint32_t>(new_i);
+
+    // Compute the threshold that determines whether this is the last digit and
+    // (if not) what the next digit's place value will be.  This logic from RFC
+    // 3492 section 6.2 is explained in section 3.3.
+    uint32_t t;
+    if (k <= bias + kTMin) {
+      t = kTMin;
+    } else if (k >= bias + kTMax) {
+      t = kTMax;
+    } else {
+      t = k - bias;
+    }
+    if (static_cast<uint32_t>(digit_value) < t) return true;
+
+    // If this gets too large, the range check on new_i in the next iteration
+    // will catch it.  We know this multiplication will not overwrap because w
+    // is 64 bits wide.
+    w *= kBase - t;
+  }
+  return false;
+}
+
+}  // namespace
+
+absl::Nullable<char*> DecodeRustPunycode(DecodeRustPunycodeOptions options) {
+  const char* punycode_begin = options.punycode_begin;
+  const char* const punycode_end = options.punycode_end;
+  char* const out_begin = options.out_begin;
+  char* const out_end = options.out_end;
+
+  // Write a NUL terminator first.  Later memcpy calls will keep bumping it
+  // along to its new right place.
+  const size_t out_size = static_cast<size_t>(out_end - out_begin);
+  if (out_size == 0) return nullptr;
+  *out_begin = '\0';
+
+  // RFC 3492 section 6.2 begins here.  We retain the names of integer variables
+  // appearing in that text.
+  uint32_t n = 128, i = 0, bias = 72, num_chars = 0;
+
+  // If there are any ASCII characters, consume them and their trailing
+  // underscore delimiter.
+  if (!ConsumeOptionalAsciiPrefix(punycode_begin, punycode_end,
+                                  out_begin, out_end, num_chars)) {
+    return nullptr;
+  }
+  uint32_t total_utf8_bytes = num_chars;
+
+  BoundedUtf8LengthSequence<kMaxChars> utf8_lengths;
+
+  // "while the input is not exhausted do begin ... end"
+  while (punycode_begin != punycode_end) {
+    if (num_chars >= kMaxChars) return nullptr;
+
+    const uint32_t old_i = i;
+
+    if (!ScanNextDelta(punycode_begin, punycode_end, bias, i)) return nullptr;
+
+    // Update bias as in RFC 3492 section 6.1.  (We have inlined adapt.)
+    uint32_t delta = i - old_i;
+    delta /= (old_i == 0 ? kDamp : 2);
+    delta += delta/(num_chars + 1);
+    bias = 0;
+    while (delta > ((kBase - kTMin) * kTMax)/2) {
+      delta /= kBase - kTMin;
+      bias += kBase;
+    }
+    bias += ((kBase - kTMin + 1) * delta)/(delta + kSkew);
+
+    // Back in section 6.2, compute the new code point and insertion index.
+    static_assert(
+        kMaxI + kMaxCodePoint < (uint64_t{1} << 32),
+        "Make kMaxI smaller or n 64 bits wide to prevent silent wraparound");
+    n += i/(num_chars + 1);
+    i %= num_chars + 1;
+
+    // To actually insert, we need to convert the code point n to UTF-8 and the
+    // character index i to an index into the byte stream emitted so far.  First
+    // prepare the UTF-8 encoding for n, rejecting surrogates, overlarge values,
+    // and anything that won't fit into the remaining output storage.
+    Utf8ForCodePoint utf8_for_code_point(n);
+    if (!utf8_for_code_point.ok()) return nullptr;
+    if (total_utf8_bytes + utf8_for_code_point.length + 1 > out_size) {
+      return nullptr;
+    }
+
+    // Now insert the new character into both our length map and the output.
+    uint32_t n_index =
+        utf8_lengths.InsertAndReturnSumOfPredecessors(
+            i, utf8_for_code_point.length);
+    std::memmove(
+        out_begin + n_index + utf8_for_code_point.length, out_begin + n_index,
+        total_utf8_bytes + 1 - n_index);
+    std::memcpy(out_begin + n_index, utf8_for_code_point.bytes,
+                utf8_for_code_point.length);
+    total_utf8_bytes += utf8_for_code_point.length;
+    ++num_chars;
+
+    // Finally, advance to the next state before continuing.
+    ++i;
+  }
+
+  return out_begin + total_utf8_bytes;
+}
+
+}  // namespace debugging_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/absl/debugging/internal/decode_rust_punycode.h b/absl/debugging/internal/decode_rust_punycode.h
new file mode 100644
index 0000000..0ae53ff
--- /dev/null
+++ b/absl/debugging/internal/decode_rust_punycode.h
@@ -0,0 +1,55 @@
+// Copyright 2024 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_DEBUGGING_INTERNAL_DECODE_RUST_PUNYCODE_H_
+#define ABSL_DEBUGGING_INTERNAL_DECODE_RUST_PUNYCODE_H_
+
+#include "absl/base/config.h"
+#include "absl/base/nullability.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+
+struct DecodeRustPunycodeOptions {
+  const char* punycode_begin;
+  const char* punycode_end;
+  char* out_begin;
+  char* out_end;
+};
+
+// Given Rust Punycode in `punycode_begin .. punycode_end`, writes the
+// corresponding UTF-8 plaintext into `out_begin .. out_end`, followed by a NUL
+// character, and returns a pointer to that final NUL on success.  On failure
+// returns a null pointer, and the contents of `out_begin .. out_end` are
+// unspecified.
+//
+// Failure occurs in precisely these cases:
+//   - Any input byte does not match [0-9a-zA-Z_].
+//   - The first input byte is an underscore, but no other underscore appears in
+//     the input.
+//   - The delta sequence does not represent a valid sequence of code-point
+//     insertions.
+//   - The plaintext would contain more than 256 code points.
+//
+// DecodeRustPunycode is async-signal-safe with bounded runtime and a small
+// stack footprint, making it suitable for use in demangling Rust symbol names
+// from a signal handler.
+absl::Nullable<char*> DecodeRustPunycode(DecodeRustPunycodeOptions options);
+
+}  // namespace debugging_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_DEBUGGING_INTERNAL_DECODE_RUST_PUNYCODE_H_
diff --git a/absl/debugging/internal/decode_rust_punycode_test.cc b/absl/debugging/internal/decode_rust_punycode_test.cc
new file mode 100644
index 0000000..78d1c33
--- /dev/null
+++ b/absl/debugging/internal/decode_rust_punycode_test.cc
@@ -0,0 +1,606 @@
+// Copyright 2024 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/debugging/internal/decode_rust_punycode.h"
+
+#include <cstddef>
+#include <cstring>
+#include <string>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+namespace {
+
+using ::testing::AllOf;
+using ::testing::Eq;
+using ::testing::IsNull;
+using ::testing::Pointee;
+using ::testing::ResultOf;
+using ::testing::StrEq;
+
+class DecodeRustPunycodeTest : public ::testing::Test {
+ protected:
+  void FillBufferWithNonzeroBytes() {
+    // The choice of nonzero value to fill with is arbitrary.  The point is just
+    // to fail tests if DecodeRustPunycode forgets to write the final NUL
+    // character.
+    std::memset(buffer_storage_, 0xab, sizeof(buffer_storage_));
+  }
+
+  DecodeRustPunycodeOptions WithAmpleSpace() {
+    FillBufferWithNonzeroBytes();
+
+    DecodeRustPunycodeOptions options;
+    options.punycode_begin = punycode_.data();
+    options.punycode_end = punycode_.data() + punycode_.size();
+    options.out_begin = buffer_storage_;
+    options.out_end = buffer_storage_ + sizeof(buffer_storage_);
+    return options;
+  }
+
+  DecodeRustPunycodeOptions WithJustEnoughSpace() {
+    FillBufferWithNonzeroBytes();
+
+    const size_t begin_offset = sizeof(buffer_storage_) - plaintext_.size() - 1;
+    DecodeRustPunycodeOptions options;
+    options.punycode_begin = punycode_.data();
+    options.punycode_end = punycode_.data() + punycode_.size();
+    options.out_begin = buffer_storage_ + begin_offset;
+    options.out_end = buffer_storage_ + sizeof(buffer_storage_);
+    return options;
+  }
+
+  DecodeRustPunycodeOptions WithOneByteTooFew() {
+    FillBufferWithNonzeroBytes();
+
+    const size_t begin_offset = sizeof(buffer_storage_) - plaintext_.size();
+    DecodeRustPunycodeOptions options;
+    options.punycode_begin = punycode_.data();
+    options.punycode_end = punycode_.data() + punycode_.size();
+    options.out_begin = buffer_storage_ + begin_offset;
+    options.out_end = buffer_storage_ + sizeof(buffer_storage_);
+    return options;
+  }
+
+  // Matches a correct return value of DecodeRustPunycode when `golden` is the
+  // expected plaintext output.
+  auto PointsToTheNulAfter(const std::string& golden) {
+    const size_t golden_size = golden.size();
+    return AllOf(
+        Pointee(Eq('\0')),
+        ResultOf("preceding string body",
+                 [golden_size](const char* p) { return p - golden_size; },
+                 StrEq(golden)));
+  }
+
+  std::string punycode_;
+  std::string plaintext_;
+  char buffer_storage_[1024];
+};
+
+TEST_F(DecodeRustPunycodeTest, MapsEmptyToEmpty) {
+  punycode_ = "";
+  plaintext_ = "";
+
+  ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
+              PointsToTheNulAfter(plaintext_));
+  ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+  EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
+}
+
+TEST_F(DecodeRustPunycodeTest,
+       StripsTheTrailingDelimiterFromAPureRunOfBasicChars) {
+  punycode_ = "foo_";
+  plaintext_ = "foo";
+
+  ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
+              PointsToTheNulAfter(plaintext_));
+  ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+  EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
+}
+
+TEST_F(DecodeRustPunycodeTest, TreatsTheLastUnderscoreAsTheDelimiter) {
+  punycode_ = "foo_bar_";
+  plaintext_ = "foo_bar";
+
+  ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
+              PointsToTheNulAfter(plaintext_));
+  ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+  EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
+}
+
+TEST_F(DecodeRustPunycodeTest, AcceptsALeadingUnderscoreIfNotTheDelimiter) {
+  punycode_ = "_foo_";
+  plaintext_ = "_foo";
+
+  ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
+              PointsToTheNulAfter(plaintext_));
+  ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+  EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
+}
+
+TEST_F(DecodeRustPunycodeTest, RejectsALeadingUnderscoreDelimiter) {
+  punycode_ = "_foo";
+
+  EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
+}
+
+TEST_F(DecodeRustPunycodeTest, RejectsEmbeddedNul) {
+  punycode_ = std::string("foo\0bar_", 8);
+
+  EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
+}
+
+TEST_F(DecodeRustPunycodeTest, RejectsAsciiCharsOtherThanIdentifierChars) {
+  punycode_ = "foo\007_";
+  EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
+
+  punycode_ = "foo-_";
+  EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
+
+  punycode_ = "foo;_";
+  EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
+
+  punycode_ = "foo\177_";
+  EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
+}
+
+TEST_F(DecodeRustPunycodeTest, RejectsRawNonAsciiChars) {
+  punycode_ = "\x80";
+  EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
+
+  punycode_ = "\x80_";
+  EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
+
+  punycode_ = "\xff";
+  EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
+
+  punycode_ = "\xff_";
+  EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
+}
+
+TEST_F(DecodeRustPunycodeTest, RecognizesU0080) {
+  // a encodes 0, so the output is the smallest non-ASCII code point standing
+  // alone.  (U+0080 PAD is not an identifier character, but DecodeRustPunycode
+  // does not check whether non-ASCII characters could belong to an identifier.)
+  punycode_ = "a";
+  plaintext_ = "\xc2\x80";
+
+  ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
+              PointsToTheNulAfter(plaintext_));
+  ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+  EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
+}
+
+TEST_F(DecodeRustPunycodeTest, OneByteDeltaSequencesMustBeA) {
+  // Because bias = 72 for the first code point, any digit but a/A is nonfinal
+  // in one of the first two bytes of a delta sequence.
+  punycode_ = "b";
+  EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
+
+  punycode_ = "z";
+  EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
+
+  punycode_ = "0";
+  EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
+
+  punycode_ = "9";
+  EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
+}
+
+TEST_F(DecodeRustPunycodeTest, AcceptsDeltaSequenceBA) {
+  punycode_ = "ba";
+  plaintext_ = "\xc2\x81";
+
+  ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
+              PointsToTheNulAfter(plaintext_));
+  ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+  EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
+}
+
+TEST_F(DecodeRustPunycodeTest, AcceptsOtherDeltaSequencesWithSecondByteA) {
+  punycode_ = "ca";
+  plaintext_ = "\xc2\x82";
+  EXPECT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+
+  punycode_ = "za";
+  plaintext_ = "\xc2\x99";
+  EXPECT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+
+  punycode_ = "0a";
+  plaintext_ = "\xc2\x9a";
+  EXPECT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+
+  punycode_ = "1a";
+  plaintext_ = "\xc2\x9b";
+  EXPECT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+
+  punycode_ = "9a";
+  plaintext_ = "£";  // Pound sign, U+00A3
+  EXPECT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+}
+
+TEST_F(DecodeRustPunycodeTest, RejectsDeltaWhereTheSecondAndLastDigitIsNotA) {
+  punycode_ = "bb";
+  EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
+
+  punycode_ = "zz";
+  EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
+
+  punycode_ = "00";
+  EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
+
+  punycode_ = "99";
+  EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
+}
+
+TEST_F(DecodeRustPunycodeTest, AcceptsDeltasWithSecondByteBFollowedByA) {
+  punycode_ = "bba";
+  plaintext_ = "¤";  // U+00A4
+  EXPECT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+
+  punycode_ = "cba";
+  plaintext_ = "¥";  // U+00A5
+  EXPECT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+
+  punycode_ = "zba";
+  plaintext_ = "¼";  // U+00BC
+  EXPECT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+
+  punycode_ = "0ba";
+  plaintext_ = "½";  // U+00BD
+  EXPECT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+
+  punycode_ = "1ba";
+  plaintext_ = "¾";  // U+00BE
+  EXPECT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+
+  punycode_ = "9ba";
+  plaintext_ = "Æ";  // U+00C6
+  EXPECT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+}
+
+// Tests beyond this point use characters allowed in identifiers, so you can
+// prepend _RNvC1cu<decimal length><underscore if [0-9_] follows> to a test
+// input and run it through another Rust demangler to verify that the
+// corresponding golden output is correct.
+
+TEST_F(DecodeRustPunycodeTest, AcceptsTwoByteCharAlone) {
+  punycode_ = "0ca";
+  plaintext_ = "à";
+
+  ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
+              PointsToTheNulAfter(plaintext_));
+  ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+  EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
+}
+
+TEST_F(DecodeRustPunycodeTest, AcceptsTwoByteCharBeforeBasicChars) {
+  punycode_ = "_la_mode_yya";
+  plaintext_ = "à_la_mode";
+
+  ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
+              PointsToTheNulAfter(plaintext_));
+  ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+  EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
+}
+
+TEST_F(DecodeRustPunycodeTest, AcceptsTwoByteCharAmidBasicChars) {
+  punycode_ = "verre__vin_m4a";
+  plaintext_ = "verre_à_vin";
+
+  ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
+              PointsToTheNulAfter(plaintext_));
+  ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+  EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
+}
+
+TEST_F(DecodeRustPunycodeTest, AcceptsTwoByteCharAfterBasicChars) {
+  punycode_ = "belt_3na";
+  plaintext_ = "beltà";
+
+  ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
+              PointsToTheNulAfter(plaintext_));
+  ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+  EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
+}
+
+TEST_F(DecodeRustPunycodeTest, AcceptsRepeatedTwoByteChar) {
+  punycode_ = "0caaaa";
+  plaintext_ = "àààà";
+
+  ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
+              PointsToTheNulAfter(plaintext_));
+  ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+  EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
+}
+
+TEST_F(DecodeRustPunycodeTest, AcceptsNearbyTwoByteCharsInOrder) {
+  punycode_ = "3camsuz";
+  plaintext_ = "ãéïôù";
+
+  ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
+              PointsToTheNulAfter(plaintext_));
+  ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+  EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
+}
+
+TEST_F(DecodeRustPunycodeTest, AcceptsNearbyTwoByteCharsOutOfOrder) {
+  punycode_ = "3caltsx";
+  plaintext_ = "ùéôãï";
+
+  ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
+              PointsToTheNulAfter(plaintext_));
+  ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+  EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
+}
+
+TEST_F(DecodeRustPunycodeTest, AcceptsThreeByteCharAlone) {
+  punycode_ = "fiq";
+  plaintext_ = "中";
+
+  ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
+              PointsToTheNulAfter(plaintext_));
+  ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+  EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
+}
+
+TEST_F(DecodeRustPunycodeTest, AcceptsRepeatedThreeByteChar) {
+  punycode_ = "fiqaaaa";
+  plaintext_ = "中中中中中";
+
+  ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
+              PointsToTheNulAfter(plaintext_));
+  ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+  EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
+}
+
+TEST_F(DecodeRustPunycodeTest, AcceptsThreeByteCharsInOrder) {
+  punycode_ = "fiq228c";
+  plaintext_ = "中文";
+
+  ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
+              PointsToTheNulAfter(plaintext_));
+  ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+  EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
+}
+
+TEST_F(DecodeRustPunycodeTest, AcceptsNearbyThreeByteCharsOutOfOrder) {
+  punycode_ = "fiq128c";
+  plaintext_ = "文中";
+
+  ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
+              PointsToTheNulAfter(plaintext_));
+  ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+  EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
+}
+
+TEST_F(DecodeRustPunycodeTest, AcceptsFourByteCharAlone) {
+  punycode_ = "uy7h";
+  plaintext_ = "🂻";
+
+  ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
+              PointsToTheNulAfter(plaintext_));
+  ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+  EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
+}
+
+TEST_F(DecodeRustPunycodeTest, AcceptsFourByteCharBeforeBasicChars) {
+  punycode_ = "jack__uh63d";
+  plaintext_ = "jack_🂻";
+
+  ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
+              PointsToTheNulAfter(plaintext_));
+  ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+  EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
+}
+
+TEST_F(DecodeRustPunycodeTest, AcceptsFourByteCharAmidBasicChars) {
+  punycode_ = "jack__of_hearts_ki37n";
+  plaintext_ = "jack_🂻_of_hearts";
+
+  ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
+              PointsToTheNulAfter(plaintext_));
+  ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+  EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
+}
+
+TEST_F(DecodeRustPunycodeTest, AcceptsFourByteCharAfterBasicChars) {
+  punycode_ = "_of_hearts_kz45i";
+  plaintext_ = "🂻_of_hearts";
+
+  ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
+              PointsToTheNulAfter(plaintext_));
+  ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+  EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
+}
+
+TEST_F(DecodeRustPunycodeTest, AcceptsRepeatedFourByteChar) {
+  punycode_ = "uy7haaaa";
+  plaintext_ = "🂻🂻🂻🂻🂻";
+
+  ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
+              PointsToTheNulAfter(plaintext_));
+  ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+  EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
+}
+
+TEST_F(DecodeRustPunycodeTest, AcceptsNearbyFourByteCharsInOrder) {
+  punycode_ = "8x7hcjmf";
+  plaintext_ = "🂦🂧🂪🂭🂮";
+
+  ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
+              PointsToTheNulAfter(plaintext_));
+  ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+  EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
+}
+
+TEST_F(DecodeRustPunycodeTest, AcceptsNearbyFourByteCharsOutOfOrder) {
+  punycode_ = "8x7hcild";
+  plaintext_ = "🂮🂦🂭🂪🂧";
+
+  ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
+              PointsToTheNulAfter(plaintext_));
+  ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+  EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
+}
+
+TEST_F(DecodeRustPunycodeTest, AcceptsAMixtureOfByteLengths) {
+  punycode_ = "3caltsx2079ivf8aiuy7cja3a6ak";
+  plaintext_ = "ùéôãï中文🂮🂦🂭🂪🂧";
+
+  ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
+              PointsToTheNulAfter(plaintext_));
+  ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+  EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
+}
+
+TEST_F(DecodeRustPunycodeTest, RejectsOverlargeDeltas) {
+  punycode_ = "123456789a";
+
+  EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
+}
+
+// Finally, we test on a few prose and poetry snippets as a defense in depth.
+// If our artificial short test inputs did not exercise a bug that is tickled by
+// patterns typical of real human writing, maybe real human writing will catch
+// that.
+//
+// These test inputs are extracted from texts old enough to be out of copyright
+// that probe a variety of ranges of code-point space.  All are longer than 32
+// code points, so they exercise the carrying of seminibbles from one uint64_t
+// to the next higher one in BoundedUtf8LengthSequence.
+
+// The first three lines of the Old English epic _Beowulf_, mostly ASCII with a
+// few archaic two-byte letters interspersed.
+TEST_F(DecodeRustPunycodeTest, Beowulf) {
+  punycode_ = "hwt_we_gardena_in_geardagum_"
+              "eodcyninga_rym_gefrunon_"
+              "hu_a_elingas_ellen_fremedon_hxg9c70do9alau";
+  plaintext_ = "hwæt_we_gardena_in_geardagum_"
+               "þeodcyninga_þrym_gefrunon_"
+               "hu_ða_æþelingas_ellen_fremedon";
+
+  ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
+              PointsToTheNulAfter(plaintext_));
+  ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+  EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
+}
+
+// The whole of 過故人莊 by the 8th-century Chinese poet 孟浩然
+// (Meng Haoran), exercising three-byte-character processing.
+TEST_F(DecodeRustPunycodeTest, MengHaoran) {
+  punycode_ = "gmq4ss0cfvao1e2wg8mcw8b0wkl9a7tt90a8riuvbk7t8kbv9a66ogofvzlf6"
+              "3d01ybn1u28dyqi5q2cxyyxnk5d2gx1ks9ddvfm17bk6gbsd6wftrav60u4ta";
+  plaintext_ = "故人具雞黍" "邀我至田家"
+               "綠樹村邊合" "青山郭外斜"
+               "開軒面場圃" "把酒話桑麻"
+               "待到重陽日" "還來就菊花";
+
+  ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
+              PointsToTheNulAfter(plaintext_));
+  ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+  EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
+}
+
+// A poem of the 8th-century Japanese poet 山上憶良 (Yamanoue no Okura).
+// Japanese mixes two-byte and three-byte characters: a good workout for codecs.
+TEST_F(DecodeRustPunycodeTest, YamanoueNoOkura) {
+  punycode_ = "48jdaa3a6ccpepjrsmlb0q4bwcdtid8fg6c0cai9822utqeruk3om0u4f2wbp0"
+              "em23do0op23cc2ff70mb6tae8aq759gja";
+  plaintext_ = "瓜食めば"
+               "子ども思ほゆ"
+               "栗食めば"
+               "まして偲はゆ"
+               "何処より"
+               "来りしものそ"
+               "眼交に"
+               "もとな懸りて"
+               "安眠し寝さぬ";
+
+  ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
+              PointsToTheNulAfter(plaintext_));
+  ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+  EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
+}
+
+// The first two lines of the Phoenician-language inscription on the sarcophagus
+// of Eshmunazar II of Sidon, 6th century BCE.  Phoenician and many other
+// archaic scripts are allocated in the Supplemental Multilingual Plane (U+10000
+// through U+1FFFF) and thus exercise four-byte-character processing.
+TEST_F(DecodeRustPunycodeTest, EshmunazarSarcophagus) {
+  punycode_ = "wj9caaabaabbaaohcacxvhdc7bgxbccbdcjeacddcedcdlddbdbddcdbdcknfcee"
+              "ifel8del2a7inq9fhcpxikms7a4a9ac9ataaa0g";
+  plaintext_ = "𐤁𐤉𐤓𐤇𐤁𐤋𐤁𐤔𐤍𐤕𐤏𐤎𐤓"
+               "𐤅𐤀𐤓𐤁𐤏𐤗𐤖𐤖𐤖𐤖𐤋𐤌𐤋𐤊𐤉𐤌𐤋𐤊"
+               "𐤀𐤔𐤌𐤍𐤏𐤆𐤓𐤌𐤋𐤊𐤑𐤃𐤍𐤌"
+               "𐤁𐤍𐤌𐤋𐤊𐤕𐤁𐤍𐤕𐤌𐤋𐤊𐤑𐤃𐤍𐤌"
+               "𐤃𐤁𐤓𐤌𐤋𐤊𐤀𐤔𐤌𐤍𐤏𐤆𐤓𐤌𐤋𐤊"
+               "𐤑𐤃𐤍𐤌𐤋𐤀𐤌𐤓𐤍𐤂𐤆𐤋𐤕";
+
+  ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
+              PointsToTheNulAfter(plaintext_));
+  ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
+              PointsToTheNulAfter(plaintext_));
+  EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
+}
+
+}  // namespace
+}  // namespace debugging_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/absl/debugging/internal/demangle.cc b/absl/debugging/internal/demangle.cc
index 381a2b5..caac763 100644
--- a/absl/debugging/internal/demangle.cc
+++ b/absl/debugging/internal/demangle.cc
@@ -14,18 +14,19 @@
 
 // For reference check out:
 // https://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling
-//
-// Note that we only have partial C++11 support yet.
 
 #include "absl/debugging/internal/demangle.h"
 
+#include <cstddef>
 #include <cstdint>
 #include <cstdio>
 #include <cstdlib>
+#include <cstring>
 #include <limits>
 #include <string>
 
 #include "absl/base/config.h"
+#include "absl/debugging/internal/demangle_rust.h"
 
 #if ABSL_INTERNAL_HAS_CXA_DEMANGLE
 #include <cxxabi.h>
@@ -44,14 +45,16 @@
 
 // List of operators from Itanium C++ ABI.
 static const AbbrevPair kOperatorList[] = {
-    // New has special syntax (not currently supported).
+    // New has special syntax.
     {"nw", "new", 0},
     {"na", "new[]", 0},
 
-    // Works except that the 'gs' prefix is not supported.
+    // Special-cased elsewhere to support the optional gs prefix.
     {"dl", "delete", 1},
     {"da", "delete[]", 1},
 
+    {"aw", "co_await", 1},
+
     {"ps", "+", 1},  // "positive"
     {"ng", "-", 1},  // "negative"
     {"ad", "&", 1},  // "address-of"
@@ -79,6 +82,7 @@
     {"rs", ">>", 2},
     {"lS", "<<=", 2},
     {"rS", ">>=", 2},
+    {"ss", "<=>", 2},
     {"eq", "==", 2},
     {"ne", "!=", 2},
     {"lt", "<", 2},
@@ -98,6 +102,7 @@
     {"qu", "?", 3},
     {"st", "sizeof", 0},  // Special syntax
     {"sz", "sizeof", 1},  // Not a real operator name, but used in expressions.
+    {"sZ", "sizeof...", 0},  // Special syntax
     {nullptr, nullptr, 0},
 };
 
@@ -187,9 +192,50 @@
   int recursion_depth;        // For stack exhaustion prevention.
   int steps;               // Cap how much work we'll do, regardless of depth.
   ParseState parse_state;  // Backtrackable state copied for most frames.
+
+  // Conditionally compiled support for marking the position of the first
+  // construct Demangle couldn't parse.  This preprocessor symbol is intended
+  // for use by Abseil demangler maintainers only; its behavior is not part of
+  // Abseil's public interface.
+#ifdef ABSL_INTERNAL_DEMANGLE_RECORDS_HIGH_WATER_MARK
+  int high_water_mark;  // Input position where parsing failed.
+  bool too_complex;  // True if any guard.IsTooComplex() call returned true.
+#endif
 } State;
 
 namespace {
+
+#ifdef ABSL_INTERNAL_DEMANGLE_RECORDS_HIGH_WATER_MARK
+void UpdateHighWaterMark(State *state) {
+  if (state->high_water_mark < state->parse_state.mangled_idx) {
+    state->high_water_mark = state->parse_state.mangled_idx;
+  }
+}
+
+void ReportHighWaterMark(State *state) {
+  // Write out the mangled name with the trouble point marked, provided that the
+  // output buffer is large enough and the mangled name did not hit a complexity
+  // limit (in which case the high water mark wouldn't point out an unparsable
+  // construct, only the point where a budget ran out).
+  const size_t input_length = std::strlen(state->mangled_begin);
+  if (input_length + 6 > static_cast<size_t>(state->out_end_idx) ||
+      state->too_complex) {
+    if (state->out_end_idx > 0) state->out[0] = '\0';
+    return;
+  }
+  const size_t high_water_mark = static_cast<size_t>(state->high_water_mark);
+  std::memcpy(state->out, state->mangled_begin, high_water_mark);
+  std::memcpy(state->out + high_water_mark, "--!--", 5);
+  std::memcpy(state->out + high_water_mark + 5,
+              state->mangled_begin + high_water_mark,
+              input_length - high_water_mark);
+  state->out[input_length + 5] = '\0';
+}
+#else
+void UpdateHighWaterMark(State *) {}
+void ReportHighWaterMark(State *) {}
+#endif
+
 // Prevent deep recursion / stack exhaustion.
 // Also prevent unbounded handling of complex inputs.
 class ComplexityGuard {
@@ -201,7 +247,7 @@
   ~ComplexityGuard() { --state_->recursion_depth; }
 
   // 256 levels of recursion seems like a reasonable upper limit on depth.
-  // 128 is not enough to demagle synthetic tests from demangle_unittest.txt:
+  // 128 is not enough to demangle synthetic tests from demangle_unittest.txt:
   // "_ZaaZZZZ..." and "_ZaaZcvZcvZ..."
   static constexpr int kRecursionDepthLimit = 256;
 
@@ -222,8 +268,14 @@
   static constexpr int kParseStepsLimit = 1 << 17;
 
   bool IsTooComplex() const {
-    return state_->recursion_depth > kRecursionDepthLimit ||
-           state_->steps > kParseStepsLimit;
+    if (state_->recursion_depth > kRecursionDepthLimit ||
+        state_->steps > kParseStepsLimit) {
+#ifdef ABSL_INTERNAL_DEMANGLE_RECORDS_HIGH_WATER_MARK
+      state_->too_complex = true;
+#endif
+      return true;
+    }
+    return false;
   }
 
  private:
@@ -270,6 +322,10 @@
   state->out_end_idx = static_cast<int>(out_size);
   state->recursion_depth = 0;
   state->steps = 0;
+#ifdef ABSL_INTERNAL_DEMANGLE_RECORDS_HIGH_WATER_MARK
+  state->high_water_mark = 0;
+  state->too_complex = false;
+#endif
 
   state->parse_state.mangled_idx = 0;
   state->parse_state.out_cur_idx = 0;
@@ -291,13 +347,14 @@
   if (guard.IsTooComplex()) return false;
   if (RemainingInput(state)[0] == one_char_token) {
     ++state->parse_state.mangled_idx;
+    UpdateHighWaterMark(state);
     return true;
   }
   return false;
 }
 
-// Returns true and advances "mangled_cur" if we find "two_char_token"
-// at "mangled_cur" position.  It is assumed that "two_char_token" does
+// Returns true and advances "mangled_idx" if we find "two_char_token"
+// at "mangled_idx" position.  It is assumed that "two_char_token" does
 // not contain '\0'.
 static bool ParseTwoCharToken(State *state, const char *two_char_token) {
   ComplexityGuard guard(state);
@@ -305,11 +362,45 @@
   if (RemainingInput(state)[0] == two_char_token[0] &&
       RemainingInput(state)[1] == two_char_token[1]) {
     state->parse_state.mangled_idx += 2;
+    UpdateHighWaterMark(state);
     return true;
   }
   return false;
 }
 
+// Returns true and advances "mangled_idx" if we find "three_char_token"
+// at "mangled_idx" position.  It is assumed that "three_char_token" does
+// not contain '\0'.
+static bool ParseThreeCharToken(State *state, const char *three_char_token) {
+  ComplexityGuard guard(state);
+  if (guard.IsTooComplex()) return false;
+  if (RemainingInput(state)[0] == three_char_token[0] &&
+      RemainingInput(state)[1] == three_char_token[1] &&
+      RemainingInput(state)[2] == three_char_token[2]) {
+    state->parse_state.mangled_idx += 3;
+    UpdateHighWaterMark(state);
+    return true;
+  }
+  return false;
+}
+
+// Returns true and advances "mangled_idx" if we find a copy of the
+// NUL-terminated string "long_token" at "mangled_idx" position.
+static bool ParseLongToken(State *state, const char *long_token) {
+  ComplexityGuard guard(state);
+  if (guard.IsTooComplex()) return false;
+  int i = 0;
+  for (; long_token[i] != '\0'; ++i) {
+    // Note that we cannot run off the end of the NUL-terminated input here.
+    // Inside the loop body, long_token[i] is known to be different from NUL.
+    // So if we read the NUL on the end of the input here, we return at once.
+    if (RemainingInput(state)[i] != long_token[i]) return false;
+  }
+  state->parse_state.mangled_idx += i;
+  UpdateHighWaterMark(state);
+  return true;
+}
+
 // Returns true and advances "mangled_cur" if we find any character in
 // "char_class" at "mangled_cur" position.
 static bool ParseCharClass(State *state, const char *char_class) {
@@ -322,6 +413,7 @@
   for (; *p != '\0'; ++p) {
     if (RemainingInput(state)[0] == *p) {
       ++state->parse_state.mangled_idx;
+      UpdateHighWaterMark(state);
       return true;
     }
   }
@@ -554,6 +646,7 @@
 static bool ParseSeqId(State *state);
 static bool ParseIdentifier(State *state, size_t length);
 static bool ParseOperatorName(State *state, int *arity);
+static bool ParseConversionOperatorType(State *state);
 static bool ParseSpecialName(State *state);
 static bool ParseCallOffset(State *state);
 static bool ParseNVOffset(State *state);
@@ -563,21 +656,33 @@
 static bool ParseDecltype(State *state);
 static bool ParseType(State *state);
 static bool ParseCVQualifiers(State *state);
+static bool ParseExtendedQualifier(State *state);
 static bool ParseBuiltinType(State *state);
+static bool ParseVendorExtendedType(State *state);
 static bool ParseFunctionType(State *state);
 static bool ParseBareFunctionType(State *state);
+static bool ParseOverloadAttribute(State *state);
 static bool ParseClassEnumType(State *state);
 static bool ParseArrayType(State *state);
 static bool ParsePointerToMemberType(State *state);
 static bool ParseTemplateParam(State *state);
+static bool ParseTemplateParamDecl(State *state);
 static bool ParseTemplateTemplateParam(State *state);
 static bool ParseTemplateArgs(State *state);
 static bool ParseTemplateArg(State *state);
 static bool ParseBaseUnresolvedName(State *state);
 static bool ParseUnresolvedName(State *state);
+static bool ParseUnresolvedQualifierLevel(State *state);
+static bool ParseUnionSelector(State* state);
+static bool ParseFunctionParam(State* state);
+static bool ParseBracedExpression(State *state);
 static bool ParseExpression(State *state);
+static bool ParseInitializer(State *state);
 static bool ParseExprPrimary(State *state);
-static bool ParseExprCastValue(State *state);
+static bool ParseExprCastValueAndTrailingE(State *state);
+static bool ParseQRequiresClauseExpr(State *state);
+static bool ParseRequirement(State *state);
+static bool ParseTypeConstraint(State *state);
 static bool ParseLocalName(State *state);
 static bool ParseLocalNameSuffix(State *state);
 static bool ParseDiscriminator(State *state);
@@ -622,22 +727,34 @@
 }
 
 // <encoding> ::= <(function) name> <bare-function-type>
+//                [`Q` <requires-clause expr>]
 //            ::= <(data) name>
 //            ::= <special-name>
+//
+// NOTE: Based on http://shortn/_Hoq9qG83rx
 static bool ParseEncoding(State *state) {
   ComplexityGuard guard(state);
   if (guard.IsTooComplex()) return false;
-  // Implementing the first two productions together as <name>
-  // [<bare-function-type>] avoids exponential blowup of backtracking.
+  // Since the first two productions both start with <name>, attempt
+  // to parse it only once to avoid exponential blowup of backtracking.
   //
-  // Since Optional(...) can't fail, there's no need to copy the state for
-  // backtracking.
-  if (ParseName(state) && Optional(ParseBareFunctionType(state))) {
+  // We're careful about exponential blowup because <encoding> recursively
+  // appears in other productions downstream of its first two productions,
+  // which means that every call to `ParseName` would possibly indirectly
+  // result in two calls to `ParseName` etc.
+  if (ParseName(state)) {
+    if (!ParseBareFunctionType(state)) {
+      return true;  // <(data) name>
+    }
+
+    // Parsed: <(function) name> <bare-function-type>
+    // Pending: [`Q` <requires-clause expr>]
+    ParseQRequiresClauseExpr(state);  // restores state on failure
     return true;
   }
 
   if (ParseSpecialName(state)) {
-    return true;
+    return true;  // <special-name>
   }
   return false;
 }
@@ -723,19 +840,26 @@
 // <prefix> ::= <prefix> <unqualified-name>
 //          ::= <template-prefix> <template-args>
 //          ::= <template-param>
+//          ::= <decltype>
 //          ::= <substitution>
 //          ::= # empty
 // <template-prefix> ::= <prefix> <(template) unqualified-name>
 //                   ::= <template-param>
 //                   ::= <substitution>
+//                   ::= <vendor-extended-type>
 static bool ParsePrefix(State *state) {
   ComplexityGuard guard(state);
   if (guard.IsTooComplex()) return false;
   bool has_something = false;
   while (true) {
     MaybeAppendSeparator(state);
-    if (ParseTemplateParam(state) ||
+    if (ParseTemplateParam(state) || ParseDecltype(state) ||
         ParseSubstitution(state, /*accept_std=*/true) ||
+        // Although the official grammar does not mention it, nested-names
+        // shaped like Nu14__some_builtinIiE6memberE occur in practice, and it
+        // is not clear what else a compiler is supposed to do when a
+        // vendor-extended type has named members.
+        ParseVendorExtendedType(state) ||
         ParseUnscopedName(state) ||
         (ParseOneCharToken(state, 'M') && ParseUnnamedTypeName(state))) {
       has_something = true;
@@ -757,8 +881,14 @@
 //                    ::= <source-name> [<abi-tags>]
 //                    ::= <local-source-name> [<abi-tags>]
 //                    ::= <unnamed-type-name> [<abi-tags>]
+//                    ::= DC <source-name>+ E  # C++17 structured binding
+//                    ::= F <source-name>  # C++20 constrained friend
+//                    ::= F <operator-name>  # C++20 constrained friend
 //
 // <local-source-name> is a GCC extension; see below.
+//
+// For the F notation for constrained friends, see
+// https://github.com/itanium-cxx-abi/cxx-abi/issues/24#issuecomment-1491130332.
 static bool ParseUnqualifiedName(State *state) {
   ComplexityGuard guard(state);
   if (guard.IsTooComplex()) return false;
@@ -767,6 +897,23 @@
       ParseUnnamedTypeName(state)) {
     return ParseAbiTags(state);
   }
+
+  // DC <source-name>+ E
+  ParseState copy = state->parse_state;
+  if (ParseTwoCharToken(state, "DC") && OneOrMore(ParseSourceName, state) &&
+      ParseOneCharToken(state, 'E')) {
+    return true;
+  }
+  state->parse_state = copy;
+
+  // F <source-name>
+  // F <operator-name>
+  if (ParseOneCharToken(state, 'F') && MaybeAppend(state, "friend ") &&
+      (ParseSourceName(state) || ParseOperatorName(state, nullptr))) {
+    return true;
+  }
+  state->parse_state = copy;
+
   return false;
 }
 
@@ -824,7 +971,11 @@
 // <unnamed-type-name> ::= Ut [<(nonnegative) number>] _
 //                     ::= <closure-type-name>
 // <closure-type-name> ::= Ul <lambda-sig> E [<(nonnegative) number>] _
-// <lambda-sig>        ::= <(parameter) type>+
+// <lambda-sig>        ::= <template-param-decl>* <(parameter) type>+
+//
+// For <template-param-decl>* in <lambda-sig> see:
+//
+// https://github.com/itanium-cxx-abi/cxx-abi/issues/31
 static bool ParseUnnamedTypeName(State *state) {
   ComplexityGuard guard(state);
   if (guard.IsTooComplex()) return false;
@@ -847,6 +998,7 @@
   // Closure type.
   which = -1;
   if (ParseTwoCharToken(state, "Ul") && DisableAppend(state) &&
+      ZeroOrMore(ParseTemplateParamDecl, state) &&
       OneOrMore(ParseType, state) && RestoreAppend(state, copy.append) &&
       ParseOneCharToken(state, 'E') && Optional(ParseNumber(state, &which)) &&
       which <= std::numeric_limits<int>::max() - 2 &&  // Don't overflow.
@@ -888,6 +1040,7 @@
   }
   if (p != RemainingInput(state)) {  // Conversion succeeded.
     state->parse_state.mangled_idx += p - RemainingInput(state);
+    UpdateHighWaterMark(state);
     if (number_out != nullptr) {
       // Note: possibly truncate "number".
       *number_out = static_cast<int>(number);
@@ -910,6 +1063,7 @@
   }
   if (p != RemainingInput(state)) {  // Conversion succeeded.
     state->parse_state.mangled_idx += p - RemainingInput(state);
+    UpdateHighWaterMark(state);
     return true;
   }
   return false;
@@ -928,6 +1082,7 @@
   }
   if (p != RemainingInput(state)) {  // Conversion succeeded.
     state->parse_state.mangled_idx += p - RemainingInput(state);
+    UpdateHighWaterMark(state);
     return true;
   }
   return false;
@@ -946,11 +1101,13 @@
     MaybeAppendWithLength(state, RemainingInput(state), length);
   }
   state->parse_state.mangled_idx += length;
+  UpdateHighWaterMark(state);
   return true;
 }
 
 // <operator-name> ::= nw, and other two letters cases
 //                 ::= cv <type>  # (cast)
+//                 ::= li <source-name>  # C++11 user-defined literal
 //                 ::= v  <digit> <source-name> # vendor extended operator
 static bool ParseOperatorName(State *state, int *arity) {
   ComplexityGuard guard(state);
@@ -961,7 +1118,7 @@
   // First check with "cv" (cast) case.
   ParseState copy = state->parse_state;
   if (ParseTwoCharToken(state, "cv") && MaybeAppend(state, "operator ") &&
-      EnterNestedName(state) && ParseType(state) &&
+      EnterNestedName(state) && ParseConversionOperatorType(state) &&
       LeaveNestedName(state, copy.nest_level)) {
     if (arity != nullptr) {
       *arity = 1;
@@ -970,6 +1127,13 @@
   }
   state->parse_state = copy;
 
+  // Then user-defined literals.
+  if (ParseTwoCharToken(state, "li") && MaybeAppend(state, "operator\"\" ") &&
+      ParseSourceName(state)) {
+    return true;
+  }
+  state->parse_state = copy;
+
   // Then vendor extended operators.
   if (ParseOneCharToken(state, 'v') && ParseDigit(state, arity) &&
       ParseSourceName(state)) {
@@ -997,36 +1161,120 @@
       }
       MaybeAppend(state, p->real_name);
       state->parse_state.mangled_idx += 2;
+      UpdateHighWaterMark(state);
       return true;
     }
   }
   return false;
 }
 
+// <operator-name> ::= cv <type>  # (cast)
+//
+// The name of a conversion operator is the one place where cv-qualifiers, *, &,
+// and other simple type combinators are expected to appear in our stripped-down
+// demangling (elsewhere they appear in function signatures or template
+// arguments, which we omit from the output).  We make reasonable efforts to
+// render simple cases accurately.
+static bool ParseConversionOperatorType(State *state) {
+  ComplexityGuard guard(state);
+  if (guard.IsTooComplex()) return false;
+  ParseState copy = state->parse_state;
+
+  // Scan pointers, const, and other easy mangling prefixes with postfix
+  // demanglings.  Remember the range of input for later rescanning.
+  //
+  // See `ParseType` and the `switch` below for the meaning of each char.
+  const char* begin_simple_prefixes = RemainingInput(state);
+  while (ParseCharClass(state, "OPRCGrVK")) {}
+  const char* end_simple_prefixes = RemainingInput(state);
+
+  // Emit the base type first.
+  if (!ParseType(state)) {
+    state->parse_state = copy;
+    return false;
+  }
+
+  // Then rescan the easy type combinators in reverse order to emit their
+  // demanglings in the expected output order.
+  while (begin_simple_prefixes != end_simple_prefixes) {
+    switch (*--end_simple_prefixes) {
+      case 'P':
+        MaybeAppend(state, "*");
+        break;
+      case 'R':
+        MaybeAppend(state, "&");
+        break;
+      case 'O':
+        MaybeAppend(state, "&&");
+        break;
+      case 'C':
+        MaybeAppend(state, " _Complex");
+        break;
+      case 'G':
+        MaybeAppend(state, " _Imaginary");
+        break;
+      case 'r':
+        MaybeAppend(state, " restrict");
+        break;
+      case 'V':
+        MaybeAppend(state, " volatile");
+        break;
+      case 'K':
+        MaybeAppend(state, " const");
+        break;
+    }
+  }
+  return true;
+}
+
 // <special-name> ::= TV <type>
 //                ::= TT <type>
 //                ::= TI <type>
 //                ::= TS <type>
-//                ::= TH <type>  # thread-local
+//                ::= TW <name>  # thread-local wrapper
+//                ::= TH <name>  # thread-local initialization
 //                ::= Tc <call-offset> <call-offset> <(base) encoding>
 //                ::= GV <(object) name>
+//                ::= GR <(object) name> [<seq-id>] _
 //                ::= T <call-offset> <(base) encoding>
+//                ::= GTt <encoding>  # transaction-safe entry point
+//                ::= TA <template-arg>  # nontype template parameter object
 // G++ extensions:
 //                ::= TC <type> <(offset) number> _ <(base) type>
 //                ::= TF <type>
 //                ::= TJ <type>
-//                ::= GR <name>
+//                ::= GR <name>  # without final _, perhaps an earlier form?
 //                ::= GA <encoding>
 //                ::= Th <call-offset> <(base) encoding>
 //                ::= Tv <call-offset> <(base) encoding>
 //
-// Note: we don't care much about them since they don't appear in
-// stack traces.  The are special data.
+// Note: Most of these are special data, not functions that occur in stack
+// traces.  Exceptions are TW and TH, which denote functions supporting the
+// thread_local feature.  For these see:
+//
+// https://maskray.me/blog/2021-02-14-all-about-thread-local-storage
+//
+// For TA see https://github.com/itanium-cxx-abi/cxx-abi/issues/63.
 static bool ParseSpecialName(State *state) {
   ComplexityGuard guard(state);
   if (guard.IsTooComplex()) return false;
   ParseState copy = state->parse_state;
-  if (ParseOneCharToken(state, 'T') && ParseCharClass(state, "VTISH") &&
+
+  if (ParseTwoCharToken(state, "TW")) {
+    MaybeAppend(state, "thread-local wrapper routine for ");
+    if (ParseName(state)) return true;
+    state->parse_state = copy;
+    return false;
+  }
+
+  if (ParseTwoCharToken(state, "TH")) {
+    MaybeAppend(state, "thread-local initialization routine for ");
+    if (ParseName(state)) return true;
+    state->parse_state = copy;
+    return false;
+  }
+
+  if (ParseOneCharToken(state, 'T') && ParseCharClass(state, "VTIS") &&
       ParseType(state)) {
     return true;
   }
@@ -1064,12 +1312,30 @@
   }
   state->parse_state = copy;
 
-  if (ParseTwoCharToken(state, "GR") && ParseName(state)) {
+  // <special-name> ::= GR <(object) name> [<seq-id>] _  # modern standard
+  //                ::= GR <(object) name>  # also recognized
+  if (ParseTwoCharToken(state, "GR")) {
+    MaybeAppend(state, "reference temporary for ");
+    if (!ParseName(state)) {
+      state->parse_state = copy;
+      return false;
+    }
+    const bool has_seq_id = ParseSeqId(state);
+    const bool has_underscore = ParseOneCharToken(state, '_');
+    if (has_seq_id && !has_underscore) {
+      state->parse_state = copy;
+      return false;
+    }
+    return true;
+  }
+
+  if (ParseTwoCharToken(state, "GA") && ParseEncoding(state)) {
     return true;
   }
   state->parse_state = copy;
 
-  if (ParseTwoCharToken(state, "GA") && ParseEncoding(state)) {
+  if (ParseThreeCharToken(state, "GTt") &&
+      MaybeAppend(state, "transaction clone for ") && ParseEncoding(state)) {
     return true;
   }
   state->parse_state = copy;
@@ -1079,6 +1345,18 @@
     return true;
   }
   state->parse_state = copy;
+
+  if (ParseTwoCharToken(state, "TA")) {
+    bool append = state->parse_state.append;
+    DisableAppend(state);
+    if (ParseTemplateArg(state)) {
+      RestoreAppend(state, append);
+      MaybeAppend(state, "template parameter object");
+      return true;
+    }
+  }
+  state->parse_state = copy;
+
   return false;
 }
 
@@ -1182,7 +1460,6 @@
 //        ::= O <type>   # rvalue reference-to (C++0x)
 //        ::= C <type>   # complex pair (C 2000)
 //        ::= G <type>   # imaginary (C 2000)
-//        ::= U <source-name> <type>  # vendor extended type qualifier
 //        ::= <builtin-type>
 //        ::= <function-type>
 //        ::= <class-enum-type>  # note: just an alias for <name>
@@ -1193,7 +1470,9 @@
 //        ::= <decltype>
 //        ::= <substitution>
 //        ::= Dp <type>          # pack expansion of (C++0x)
-//        ::= Dv <num-elems> _   # GNU vector extension
+//        ::= Dv <(elements) number> _ <type>  # GNU vector extension
+//        ::= Dv <(bytes) expression> _ <type>
+//        ::= Dk <type-constraint>  # constrained auto
 //
 static bool ParseType(State *state) {
   ComplexityGuard guard(state);
@@ -1236,12 +1515,6 @@
   }
   state->parse_state = copy;
 
-  if (ParseOneCharToken(state, 'U') && ParseSourceName(state) &&
-      ParseType(state)) {
-    return true;
-  }
-  state->parse_state = copy;
-
   if (ParseBuiltinType(state) || ParseFunctionType(state) ||
       ParseClassEnumType(state) || ParseArrayType(state) ||
       ParsePointerToMemberType(state) || ParseDecltype(state) ||
@@ -1260,54 +1533,160 @@
     return true;
   }
 
+  // GNU vector extension Dv <number> _ <type>
   if (ParseTwoCharToken(state, "Dv") && ParseNumber(state, nullptr) &&
-      ParseOneCharToken(state, '_')) {
+      ParseOneCharToken(state, '_') && ParseType(state)) {
     return true;
   }
   state->parse_state = copy;
 
-  return false;
+  // GNU vector extension Dv <expression> _ <type>
+  if (ParseTwoCharToken(state, "Dv") && ParseExpression(state) &&
+      ParseOneCharToken(state, '_') && ParseType(state)) {
+    return true;
+  }
+  state->parse_state = copy;
+
+  if (ParseTwoCharToken(state, "Dk") && ParseTypeConstraint(state)) {
+    return true;
+  }
+  state->parse_state = copy;
+
+  // For this notation see CXXNameMangler::mangleType in Clang's source code.
+  // The relevant logic and its comment "not clear how to mangle this!" date
+  // from 2011, so it may be with us awhile.
+  return ParseLongToken(state, "_SUBSTPACK_");
 }
 
+// <qualifiers> ::= <extended-qualifier>* <CV-qualifiers>
 // <CV-qualifiers> ::= [r] [V] [K]
+//
 // We don't allow empty <CV-qualifiers> to avoid infinite loop in
 // ParseType().
 static bool ParseCVQualifiers(State *state) {
   ComplexityGuard guard(state);
   if (guard.IsTooComplex()) return false;
   int num_cv_qualifiers = 0;
+  while (ParseExtendedQualifier(state)) ++num_cv_qualifiers;
   num_cv_qualifiers += ParseOneCharToken(state, 'r');
   num_cv_qualifiers += ParseOneCharToken(state, 'V');
   num_cv_qualifiers += ParseOneCharToken(state, 'K');
   return num_cv_qualifiers > 0;
 }
 
+// <extended-qualifier> ::= U <source-name> [<template-args>]
+static bool ParseExtendedQualifier(State *state) {
+  ComplexityGuard guard(state);
+  if (guard.IsTooComplex()) return false;
+  ParseState copy = state->parse_state;
+
+  if (!ParseOneCharToken(state, 'U')) return false;
+
+  bool append = state->parse_state.append;
+  DisableAppend(state);
+  if (!ParseSourceName(state)) {
+    state->parse_state = copy;
+    return false;
+  }
+  Optional(ParseTemplateArgs(state));
+  RestoreAppend(state, append);
+  return true;
+}
+
 // <builtin-type> ::= v, etc.  # single-character builtin types
-//                ::= u <source-name>
+//                ::= <vendor-extended-type>
 //                ::= Dd, etc.  # two-character builtin types
+//                ::= DB (<number> | <expression>) _  # _BitInt(N)
+//                ::= DU (<number> | <expression>) _  # unsigned _BitInt(N)
+//                ::= DF <number> _  # _FloatN (N bits)
+//                ::= DF <number> x  # _FloatNx
+//                ::= DF16b  # std::bfloat16_t
 //
 // Not supported:
-//                ::= DF <number> _ # _FloatN (N bits)
-//
+//                ::= [DS] DA <fixed-point-size>
+//                ::= [DS] DR <fixed-point-size>
+// because real implementations of N1169 fixed-point are scant.
 static bool ParseBuiltinType(State *state) {
   ComplexityGuard guard(state);
   if (guard.IsTooComplex()) return false;
-  const AbbrevPair *p;
-  for (p = kBuiltinTypeList; p->abbrev != nullptr; ++p) {
+  ParseState copy = state->parse_state;
+
+  // DB (<number> | <expression>) _  # _BitInt(N)
+  // DU (<number> | <expression>) _  # unsigned _BitInt(N)
+  if (ParseTwoCharToken(state, "DB") ||
+      (ParseTwoCharToken(state, "DU") && MaybeAppend(state, "unsigned "))) {
+    bool append = state->parse_state.append;
+    DisableAppend(state);
+    int number = -1;
+    if (!ParseNumber(state, &number) && !ParseExpression(state)) {
+      state->parse_state = copy;
+      return false;
+    }
+    RestoreAppend(state, append);
+
+    if (!ParseOneCharToken(state, '_')) {
+      state->parse_state = copy;
+      return false;
+    }
+
+    MaybeAppend(state, "_BitInt(");
+    if (number >= 0) {
+      MaybeAppendDecimal(state, number);
+    } else {
+      MaybeAppend(state, "?");  // the best we can do for dependent sizes
+    }
+    MaybeAppend(state, ")");
+    return true;
+  }
+
+  // DF <number> _  # _FloatN
+  // DF <number> x  # _FloatNx
+  // DF16b  # std::bfloat16_t
+  if (ParseTwoCharToken(state, "DF")) {
+    if (ParseThreeCharToken(state, "16b")) {
+      MaybeAppend(state, "std::bfloat16_t");
+      return true;
+    }
+    int number = 0;
+    if (!ParseNumber(state, &number)) {
+      state->parse_state = copy;
+      return false;
+    }
+    MaybeAppend(state, "_Float");
+    MaybeAppendDecimal(state, number);
+    if (ParseOneCharToken(state, 'x')) {
+      MaybeAppend(state, "x");
+      return true;
+    }
+    if (ParseOneCharToken(state, '_')) return true;
+    state->parse_state = copy;
+    return false;
+  }
+
+  for (const AbbrevPair *p = kBuiltinTypeList; p->abbrev != nullptr; ++p) {
     // Guaranteed only 1- or 2-character strings in kBuiltinTypeList.
     if (p->abbrev[1] == '\0') {
       if (ParseOneCharToken(state, p->abbrev[0])) {
         MaybeAppend(state, p->real_name);
-        return true;
+        return true;  // ::= v, etc.  # single-character builtin types
       }
     } else if (p->abbrev[2] == '\0' && ParseTwoCharToken(state, p->abbrev)) {
       MaybeAppend(state, p->real_name);
-      return true;
+      return true;  // ::= Dd, etc.  # two-character builtin types
     }
   }
 
+  return ParseVendorExtendedType(state);
+}
+
+// <vendor-extended-type> ::= u <source-name> [<template-args>]
+static bool ParseVendorExtendedType(State *state) {
+  ComplexityGuard guard(state);
+  if (guard.IsTooComplex()) return false;
+
   ParseState copy = state->parse_state;
-  if (ParseOneCharToken(state, 'u') && ParseSourceName(state)) {
+  if (ParseOneCharToken(state, 'u') && ParseSourceName(state) &&
+      Optional(ParseTemplateArgs(state))) {
     return true;
   }
   state->parse_state = copy;
@@ -1342,28 +1721,44 @@
   return false;
 }
 
-// <function-type> ::= [exception-spec] F [Y] <bare-function-type> [O] E
+// <function-type> ::=
+//     [exception-spec] [Dx] F [Y] <bare-function-type> [<ref-qualifier>] E
+//
+// <ref-qualifier> ::= R | O
 static bool ParseFunctionType(State *state) {
   ComplexityGuard guard(state);
   if (guard.IsTooComplex()) return false;
   ParseState copy = state->parse_state;
-  if (Optional(ParseExceptionSpec(state)) && ParseOneCharToken(state, 'F') &&
-      Optional(ParseOneCharToken(state, 'Y')) && ParseBareFunctionType(state) &&
-      Optional(ParseOneCharToken(state, 'O')) &&
-      ParseOneCharToken(state, 'E')) {
-    return true;
+  Optional(ParseExceptionSpec(state));
+  Optional(ParseTwoCharToken(state, "Dx"));
+  if (!ParseOneCharToken(state, 'F')) {
+    state->parse_state = copy;
+    return false;
   }
-  state->parse_state = copy;
-  return false;
+  Optional(ParseOneCharToken(state, 'Y'));
+  if (!ParseBareFunctionType(state)) {
+    state->parse_state = copy;
+    return false;
+  }
+  Optional(ParseCharClass(state, "RO"));
+  if (!ParseOneCharToken(state, 'E')) {
+    state->parse_state = copy;
+    return false;
+  }
+  return true;
 }
 
-// <bare-function-type> ::= <(signature) type>+
+// <bare-function-type> ::= <overload-attribute>* <(signature) type>+
+//
+// The <overload-attribute>* prefix is nonstandard; see the comment on
+// ParseOverloadAttribute.
 static bool ParseBareFunctionType(State *state) {
   ComplexityGuard guard(state);
   if (guard.IsTooComplex()) return false;
   ParseState copy = state->parse_state;
   DisableAppend(state);
-  if (OneOrMore(ParseType, state)) {
+  if (ZeroOrMore(ParseOverloadAttribute, state) &&
+      OneOrMore(ParseType, state)) {
     RestoreAppend(state, copy.append);
     MaybeAppend(state, "()");
     return true;
@@ -1372,11 +1767,43 @@
   return false;
 }
 
+// <overload-attribute> ::= Ua <name>
+//
+// The nonstandard <overload-attribute> production is sufficient to accept the
+// current implementation of __attribute__((enable_if(condition, "message")))
+// and future attributes of a similar shape.  See
+// https://clang.llvm.org/docs/AttributeReference.html#enable-if and the
+// definition of CXXNameMangler::mangleFunctionEncodingBareType in Clang's
+// source code.
+static bool ParseOverloadAttribute(State *state) {
+  ComplexityGuard guard(state);
+  if (guard.IsTooComplex()) return false;
+  ParseState copy = state->parse_state;
+  if (ParseTwoCharToken(state, "Ua") && ParseName(state)) {
+    return true;
+  }
+  state->parse_state = copy;
+  return false;
+}
+
 // <class-enum-type> ::= <name>
+//                   ::= Ts <name>  # struct Name or class Name
+//                   ::= Tu <name>  # union Name
+//                   ::= Te <name>  # enum Name
+//
+// See http://shortn/_W3YrltiEd0.
 static bool ParseClassEnumType(State *state) {
   ComplexityGuard guard(state);
   if (guard.IsTooComplex()) return false;
-  return ParseName(state);
+  ParseState copy = state->parse_state;
+  if (Optional(ParseTwoCharToken(state, "Ts") ||
+               ParseTwoCharToken(state, "Tu") ||
+               ParseTwoCharToken(state, "Te")) &&
+      ParseName(state)) {
+    return true;
+  }
+  state->parse_state = copy;
+  return false;
 }
 
 // <array-type> ::= A <(positive dimension) number> _ <(element) type>
@@ -1413,21 +1840,83 @@
 
 // <template-param> ::= T_
 //                  ::= T <parameter-2 non-negative number> _
+//                  ::= TL <level-1> __
+//                  ::= TL <level-1> _ <parameter-2 non-negative number> _
 static bool ParseTemplateParam(State *state) {
   ComplexityGuard guard(state);
   if (guard.IsTooComplex()) return false;
   if (ParseTwoCharToken(state, "T_")) {
     MaybeAppend(state, "?");  // We don't support template substitutions.
-    return true;
+    return true;              // ::= T_
   }
 
   ParseState copy = state->parse_state;
   if (ParseOneCharToken(state, 'T') && ParseNumber(state, nullptr) &&
       ParseOneCharToken(state, '_')) {
     MaybeAppend(state, "?");  // We don't support template substitutions.
+    return true;              // ::= T <parameter-2 non-negative number> _
+  }
+  state->parse_state = copy;
+
+  if (ParseTwoCharToken(state, "TL") && ParseNumber(state, nullptr)) {
+    if (ParseTwoCharToken(state, "__")) {
+      MaybeAppend(state, "?");  // We don't support template substitutions.
+      return true;              // ::= TL <level-1> __
+    }
+
+    if (ParseOneCharToken(state, '_') && ParseNumber(state, nullptr) &&
+        ParseOneCharToken(state, '_')) {
+      MaybeAppend(state, "?");  // We don't support template substitutions.
+      return true;  // ::= TL <level-1> _ <parameter-2 non-negative number> _
+    }
+  }
+  state->parse_state = copy;
+  return false;
+}
+
+// <template-param-decl>
+//   ::= Ty                                  # template type parameter
+//   ::= Tk <concept name> [<template-args>] # constrained type parameter
+//   ::= Tn <type>                           # template non-type parameter
+//   ::= Tt <template-param-decl>* E         # template template parameter
+//   ::= Tp <template-param-decl>            # template parameter pack
+//
+// NOTE: <concept name> is just a <name>: http://shortn/_MqJVyr0fc1
+// TODO(b/324066279): Implement optional suffix for `Tt`:
+// [Q <requires-clause expr>]
+static bool ParseTemplateParamDecl(State *state) {
+  ComplexityGuard guard(state);
+  if (guard.IsTooComplex()) return false;
+  ParseState copy = state->parse_state;
+
+  if (ParseTwoCharToken(state, "Ty")) {
     return true;
   }
   state->parse_state = copy;
+
+  if (ParseTwoCharToken(state, "Tk") && ParseName(state) &&
+      Optional(ParseTemplateArgs(state))) {
+    return true;
+  }
+  state->parse_state = copy;
+
+  if (ParseTwoCharToken(state, "Tn") && ParseType(state)) {
+    return true;
+  }
+  state->parse_state = copy;
+
+  if (ParseTwoCharToken(state, "Tt") &&
+      ZeroOrMore(ParseTemplateParamDecl, state) &&
+      ParseOneCharToken(state, 'E')) {
+    return true;
+  }
+  state->parse_state = copy;
+
+  if (ParseTwoCharToken(state, "Tp") && ParseTemplateParamDecl(state)) {
+    return true;
+  }
+  state->parse_state = copy;
+
   return false;
 }
 
@@ -1441,13 +1930,14 @@
           ParseSubstitution(state, /*accept_std=*/false));
 }
 
-// <template-args> ::= I <template-arg>+ E
+// <template-args> ::= I <template-arg>+ [Q <requires-clause expr>] E
 static bool ParseTemplateArgs(State *state) {
   ComplexityGuard guard(state);
   if (guard.IsTooComplex()) return false;
   ParseState copy = state->parse_state;
   DisableAppend(state);
   if (ParseOneCharToken(state, 'I') && OneOrMore(ParseTemplateArg, state) &&
+      Optional(ParseQRequiresClauseExpr(state)) &&
       ParseOneCharToken(state, 'E')) {
     RestoreAppend(state, copy.append);
     MaybeAppend(state, "<>");
@@ -1457,7 +1947,8 @@
   return false;
 }
 
-// <template-arg>  ::= <type>
+// <template-arg>  ::= <template-param-decl> <template-arg>
+//                 ::= <type>
 //                 ::= <expr-primary>
 //                 ::= J <template-arg>* E        # argument pack
 //                 ::= X <expression> E
@@ -1541,7 +2032,7 @@
   //     ::= L <source-name> [<template-args>] [<expr-cast-value> E]
   if (ParseLocalSourceName(state) && Optional(ParseTemplateArgs(state))) {
     copy = state->parse_state;
-    if (ParseExprCastValue(state) && ParseOneCharToken(state, 'E')) {
+    if (ParseExprCastValueAndTrailingE(state)) {
       return true;
     }
     state->parse_state = copy;
@@ -1560,6 +2051,12 @@
     return true;
   }
   state->parse_state = copy;
+
+  if (ParseTemplateParamDecl(state) && ParseTemplateArg(state)) {
+    return true;
+  }
+  state->parse_state = copy;
+
   return false;
 }
 
@@ -1614,6 +2111,13 @@
 //                         <base-unresolved-name>
 //                   ::= [gs] sr <unresolved-qualifier-level>+ E
 //                         <base-unresolved-name>
+//                   ::= sr St <simple-id> <simple-id>  # nonstandard
+//
+// The last case is not part of the official grammar but has been observed in
+// real-world examples that the GNU demangler (but not the LLVM demangler) is
+// able to decode; see demangle_test.cc for one such symbol name.  The shape
+// sr St <simple-id> <simple-id> was inferred by closed-box testing of the GNU
+// demangler.
 static bool ParseUnresolvedName(State *state) {
   ComplexityGuard guard(state);
   if (guard.IsTooComplex()) return false;
@@ -1633,7 +2137,7 @@
 
   if (ParseTwoCharToken(state, "sr") && ParseOneCharToken(state, 'N') &&
       ParseUnresolvedType(state) &&
-      OneOrMore(/* <unresolved-qualifier-level> ::= */ ParseSimpleId, state) &&
+      OneOrMore(ParseUnresolvedQualifierLevel, state) &&
       ParseOneCharToken(state, 'E') && ParseBaseUnresolvedName(state)) {
     return true;
   }
@@ -1641,35 +2145,160 @@
 
   if (Optional(ParseTwoCharToken(state, "gs")) &&
       ParseTwoCharToken(state, "sr") &&
-      OneOrMore(/* <unresolved-qualifier-level> ::= */ ParseSimpleId, state) &&
+      OneOrMore(ParseUnresolvedQualifierLevel, state) &&
       ParseOneCharToken(state, 'E') && ParseBaseUnresolvedName(state)) {
     return true;
   }
   state->parse_state = copy;
 
+  if (ParseTwoCharToken(state, "sr") && ParseTwoCharToken(state, "St") &&
+      ParseSimpleId(state) && ParseSimpleId(state)) {
+    return true;
+  }
+  state->parse_state = copy;
+
   return false;
 }
 
-// <expression> ::= <1-ary operator-name> <expression>
-//              ::= <2-ary operator-name> <expression> <expression>
-//              ::= <3-ary operator-name> <expression> <expression> <expression>
-//              ::= cl <expression>+ E
-//              ::= cp <simple-id> <expression>* E # Clang-specific.
-//              ::= cv <type> <expression>      # type (expression)
-//              ::= cv <type> _ <expression>* E # type (expr-list)
-//              ::= st <type>
-//              ::= <template-param>
-//              ::= <function-param>
-//              ::= <expr-primary>
-//              ::= dt <expression> <unresolved-name> # expr.name
-//              ::= pt <expression> <unresolved-name> # expr->name
-//              ::= sp <expression>         # argument pack expansion
-//              ::= sr <type> <unqualified-name> <template-args>
-//              ::= sr <type> <unqualified-name>
+// <unresolved-qualifier-level> ::= <simple-id>
+//                              ::= <substitution> <template-args>
+//
+// The production <substitution> <template-args> is nonstandard but is observed
+// in practice.  An upstream discussion on the best shape of <unresolved-name>
+// has not converged:
+//
+// https://github.com/itanium-cxx-abi/cxx-abi/issues/38
+static bool ParseUnresolvedQualifierLevel(State *state) {
+  ComplexityGuard guard(state);
+  if (guard.IsTooComplex()) return false;
+
+  if (ParseSimpleId(state)) return true;
+
+  ParseState copy = state->parse_state;
+  if (ParseSubstitution(state, /*accept_std=*/false) &&
+      ParseTemplateArgs(state)) {
+    return true;
+  }
+  state->parse_state = copy;
+  return false;
+}
+
+// <union-selector> ::= _ [<number>]
+//
+// https://github.com/itanium-cxx-abi/cxx-abi/issues/47
+static bool ParseUnionSelector(State *state) {
+  return ParseOneCharToken(state, '_') && Optional(ParseNumber(state, nullptr));
+}
+
 // <function-param> ::= fp <(top-level) CV-qualifiers> _
 //                  ::= fp <(top-level) CV-qualifiers> <number> _
 //                  ::= fL <number> p <(top-level) CV-qualifiers> _
 //                  ::= fL <number> p <(top-level) CV-qualifiers> <number> _
+//                  ::= fpT  # this
+static bool ParseFunctionParam(State *state) {
+  ComplexityGuard guard(state);
+  if (guard.IsTooComplex()) return false;
+
+  ParseState copy = state->parse_state;
+
+  // Function-param expression (level 0).
+  if (ParseTwoCharToken(state, "fp") && Optional(ParseCVQualifiers(state)) &&
+      Optional(ParseNumber(state, nullptr)) && ParseOneCharToken(state, '_')) {
+    return true;
+  }
+  state->parse_state = copy;
+
+  // Function-param expression (level 1+).
+  if (ParseTwoCharToken(state, "fL") && Optional(ParseNumber(state, nullptr)) &&
+      ParseOneCharToken(state, 'p') && Optional(ParseCVQualifiers(state)) &&
+      Optional(ParseNumber(state, nullptr)) && ParseOneCharToken(state, '_')) {
+    return true;
+  }
+  state->parse_state = copy;
+
+  return ParseThreeCharToken(state, "fpT");
+}
+
+// <braced-expression> ::= <expression>
+//                     ::= di <field source-name> <braced-expression>
+//                     ::= dx <index expression> <braced-expression>
+//                     ::= dX <expression> <expression> <braced-expression>
+static bool ParseBracedExpression(State *state) {
+  ComplexityGuard guard(state);
+  if (guard.IsTooComplex()) return false;
+
+  ParseState copy = state->parse_state;
+
+  if (ParseTwoCharToken(state, "di") && ParseSourceName(state) &&
+      ParseBracedExpression(state)) {
+    return true;
+  }
+  state->parse_state = copy;
+
+  if (ParseTwoCharToken(state, "dx") && ParseExpression(state) &&
+      ParseBracedExpression(state)) {
+    return true;
+  }
+  state->parse_state = copy;
+
+  if (ParseTwoCharToken(state, "dX") &&
+      ParseExpression(state) && ParseExpression(state) &&
+      ParseBracedExpression(state)) {
+    return true;
+  }
+  state->parse_state = copy;
+
+  return ParseExpression(state);
+}
+
+// <expression> ::= <1-ary operator-name> <expression>
+//              ::= <2-ary operator-name> <expression> <expression>
+//              ::= <3-ary operator-name> <expression> <expression> <expression>
+//              ::= pp_ <expression>  # ++e; pp <expression> is e++
+//              ::= mm_ <expression>  # --e; mm <expression> is e--
+//              ::= cl <expression>+ E
+//              ::= cp <simple-id> <expression>* E # Clang-specific.
+//              ::= so <type> <expression> [<number>] <union-selector>* [p] E
+//              ::= cv <type> <expression>      # type (expression)
+//              ::= cv <type> _ <expression>* E # type (expr-list)
+//              ::= tl <type> <braced-expression>* E
+//              ::= il <braced-expression>* E
+//              ::= [gs] nw <expression>* _ <type> E
+//              ::= [gs] nw <expression>* _ <type> <initializer>
+//              ::= [gs] na <expression>* _ <type> E
+//              ::= [gs] na <expression>* _ <type> <initializer>
+//              ::= [gs] dl <expression>
+//              ::= [gs] da <expression>
+//              ::= dc <type> <expression>
+//              ::= sc <type> <expression>
+//              ::= cc <type> <expression>
+//              ::= rc <type> <expression>
+//              ::= ti <type>
+//              ::= te <expression>
+//              ::= st <type>
+//              ::= at <type>
+//              ::= az <expression>
+//              ::= nx <expression>
+//              ::= <template-param>
+//              ::= <function-param>
+//              ::= sZ <template-param>
+//              ::= sZ <function-param>
+//              ::= sP <template-arg>* E
+//              ::= <expr-primary>
+//              ::= dt <expression> <unresolved-name> # expr.name
+//              ::= pt <expression> <unresolved-name> # expr->name
+//              ::= sp <expression>         # argument pack expansion
+//              ::= fl <binary operator-name> <expression>
+//              ::= fr <binary operator-name> <expression>
+//              ::= fL <binary operator-name> <expression> <expression>
+//              ::= fR <binary operator-name> <expression> <expression>
+//              ::= tw <expression>
+//              ::= tr
+//              ::= sr <type> <unqualified-name> <template-args>
+//              ::= sr <type> <unqualified-name>
+//              ::= u <source-name> <template-arg>* E  # vendor extension
+//              ::= rq <requirement>+ E
+//              ::= rQ <bare-function-type> _ <requirement>+ E
 static bool ParseExpression(State *state) {
   ComplexityGuard guard(state);
   if (guard.IsTooComplex()) return false;
@@ -1686,6 +2315,15 @@
   }
   state->parse_state = copy;
 
+  // Preincrement and predecrement.  Postincrement and postdecrement are handled
+  // by the operator-name logic later on.
+  if ((ParseThreeCharToken(state, "pp_") ||
+       ParseThreeCharToken(state, "mm_")) &&
+      ParseExpression(state)) {
+    return true;
+  }
+  state->parse_state = copy;
+
   // Clang-specific "cp <simple-id> <expression>* E"
   //   https://clang.llvm.org/doxygen/ItaniumMangle_8cpp_source.html#l04338
   if (ParseTwoCharToken(state, "cp") && ParseSimpleId(state) &&
@@ -1694,17 +2332,65 @@
   }
   state->parse_state = copy;
 
-  // Function-param expression (level 0).
-  if (ParseTwoCharToken(state, "fp") && Optional(ParseCVQualifiers(state)) &&
-      Optional(ParseNumber(state, nullptr)) && ParseOneCharToken(state, '_')) {
+  // <expression> ::= so <type> <expression> [<number>] <union-selector>* [p] E
+  //
+  // https://github.com/itanium-cxx-abi/cxx-abi/issues/47
+  if (ParseTwoCharToken(state, "so") && ParseType(state) &&
+      ParseExpression(state) && Optional(ParseNumber(state, nullptr)) &&
+      ZeroOrMore(ParseUnionSelector, state) &&
+      Optional(ParseOneCharToken(state, 'p')) &&
+      ParseOneCharToken(state, 'E')) {
     return true;
   }
   state->parse_state = copy;
 
-  // Function-param expression (level 1+).
-  if (ParseTwoCharToken(state, "fL") && Optional(ParseNumber(state, nullptr)) &&
-      ParseOneCharToken(state, 'p') && Optional(ParseCVQualifiers(state)) &&
-      Optional(ParseNumber(state, nullptr)) && ParseOneCharToken(state, '_')) {
+  // <expression> ::= <function-param>
+  if (ParseFunctionParam(state)) return true;
+  state->parse_state = copy;
+
+  // <expression> ::= tl <type> <braced-expression>* E
+  if (ParseTwoCharToken(state, "tl") && ParseType(state) &&
+      ZeroOrMore(ParseBracedExpression, state) &&
+      ParseOneCharToken(state, 'E')) {
+    return true;
+  }
+  state->parse_state = copy;
+
+  // <expression> ::= il <braced-expression>* E
+  if (ParseTwoCharToken(state, "il") &&
+      ZeroOrMore(ParseBracedExpression, state) &&
+      ParseOneCharToken(state, 'E')) {
+    return true;
+  }
+  state->parse_state = copy;
+
+  // <expression> ::= [gs] nw <expression>* _ <type> E
+  //              ::= [gs] nw <expression>* _ <type> <initializer>
+  //              ::= [gs] na <expression>* _ <type> E
+  //              ::= [gs] na <expression>* _ <type> <initializer>
+  if (Optional(ParseTwoCharToken(state, "gs")) &&
+      (ParseTwoCharToken(state, "nw") || ParseTwoCharToken(state, "na")) &&
+      ZeroOrMore(ParseExpression, state) && ParseOneCharToken(state, '_') &&
+      ParseType(state) &&
+      (ParseOneCharToken(state, 'E') || ParseInitializer(state))) {
+    return true;
+  }
+  state->parse_state = copy;
+
+  // <expression> ::= [gs] dl <expression>
+  //              ::= [gs] da <expression>
+  if (Optional(ParseTwoCharToken(state, "gs")) &&
+      (ParseTwoCharToken(state, "dl") || ParseTwoCharToken(state, "da")) &&
+      ParseExpression(state)) {
+    return true;
+  }
+  state->parse_state = copy;
+
+  // dynamic_cast, static_cast, const_cast, reinterpret_cast.
+  //
+  // <expression> ::= (dc | sc | cc | rc) <type> <expression>
+  if (ParseCharClass(state, "dscr") && ParseOneCharToken(state, 'c') &&
+      ParseType(state) && ParseExpression(state)) {
     return true;
   }
   state->parse_state = copy;
@@ -1746,15 +2432,96 @@
   }
   state->parse_state = copy;
 
+  // typeid(type)
+  if (ParseTwoCharToken(state, "ti") && ParseType(state)) {
+    return true;
+  }
+  state->parse_state = copy;
+
+  // typeid(expression)
+  if (ParseTwoCharToken(state, "te") && ParseExpression(state)) {
+    return true;
+  }
+  state->parse_state = copy;
+
   // sizeof type
   if (ParseTwoCharToken(state, "st") && ParseType(state)) {
     return true;
   }
   state->parse_state = copy;
 
+  // alignof(type)
+  if (ParseTwoCharToken(state, "at") && ParseType(state)) {
+    return true;
+  }
+  state->parse_state = copy;
+
+  // alignof(expression), a GNU extension
+  if (ParseTwoCharToken(state, "az") && ParseExpression(state)) {
+    return true;
+  }
+  state->parse_state = copy;
+
+  // noexcept(expression) appearing as an expression in a dependent signature
+  if (ParseTwoCharToken(state, "nx") && ParseExpression(state)) {
+    return true;
+  }
+  state->parse_state = copy;
+
+  // sizeof...(pack)
+  //
+  // <expression> ::= sZ <template-param>
+  //              ::= sZ <function-param>
+  if (ParseTwoCharToken(state, "sZ") &&
+      (ParseFunctionParam(state) || ParseTemplateParam(state))) {
+    return true;
+  }
+  state->parse_state = copy;
+
+  // sizeof...(pack) captured from an alias template
+  //
+  // <expression> ::= sP <template-arg>* E
+  if (ParseTwoCharToken(state, "sP") && ZeroOrMore(ParseTemplateArg, state) &&
+      ParseOneCharToken(state, 'E')) {
+    return true;
+  }
+  state->parse_state = copy;
+
+  // Unary folds (... op pack) and (pack op ...).
+  //
+  // <expression> ::= fl <binary operator-name> <expression>
+  //              ::= fr <binary operator-name> <expression>
+  if ((ParseTwoCharToken(state, "fl") || ParseTwoCharToken(state, "fr")) &&
+      ParseOperatorName(state, nullptr) && ParseExpression(state)) {
+    return true;
+  }
+  state->parse_state = copy;
+
+  // Binary folds (init op ... op pack) and (pack op ... op init).
+  //
+  // <expression> ::= fL <binary operator-name> <expression> <expression>
+  //              ::= fR <binary operator-name> <expression> <expression>
+  if ((ParseTwoCharToken(state, "fL") || ParseTwoCharToken(state, "fR")) &&
+      ParseOperatorName(state, nullptr) && ParseExpression(state) &&
+      ParseExpression(state)) {
+    return true;
+  }
+  state->parse_state = copy;
+
+  // tw <expression>: throw e
+  if (ParseTwoCharToken(state, "tw") && ParseExpression(state)) {
+    return true;
+  }
+  state->parse_state = copy;
+
+  // tr: throw (rethrows an exception from the handler that caught it)
+  if (ParseTwoCharToken(state, "tr")) return true;
+
   // Object and pointer member access expressions.
+  //
+  // <expression> ::= (dt | pt) <expression> <unresolved-name>
   if ((ParseTwoCharToken(state, "dt") || ParseTwoCharToken(state, "pt")) &&
-      ParseExpression(state) && ParseType(state)) {
+      ParseExpression(state) && ParseUnresolvedName(state)) {
     return true;
   }
   state->parse_state = copy;
@@ -1774,9 +2541,61 @@
   }
   state->parse_state = copy;
 
+  // Vendor extended expressions
+  if (ParseOneCharToken(state, 'u') && ParseSourceName(state) &&
+      ZeroOrMore(ParseTemplateArg, state) && ParseOneCharToken(state, 'E')) {
+    return true;
+  }
+  state->parse_state = copy;
+
+  // <expression> ::= rq <requirement>+ E
+  //
+  // https://github.com/itanium-cxx-abi/cxx-abi/issues/24
+  if (ParseTwoCharToken(state, "rq") && OneOrMore(ParseRequirement, state) &&
+      ParseOneCharToken(state, 'E')) {
+    return true;
+  }
+  state->parse_state = copy;
+
+  // <expression> ::= rQ <bare-function-type> _ <requirement>+ E
+  //
+  // https://github.com/itanium-cxx-abi/cxx-abi/issues/24
+  if (ParseTwoCharToken(state, "rQ") && ParseBareFunctionType(state) &&
+      ParseOneCharToken(state, '_') && OneOrMore(ParseRequirement, state) &&
+      ParseOneCharToken(state, 'E')) {
+    return true;
+  }
+  state->parse_state = copy;
+
   return ParseUnresolvedName(state);
 }
 
+// <initializer> ::= pi <expression>* E
+//               ::= il <braced-expression>* E
+//
+// The il ... E form is not in the ABI spec but is seen in practice for
+// braced-init-lists in new-expressions, which are standard syntax from C++11
+// on.
+static bool ParseInitializer(State *state) {
+  ComplexityGuard guard(state);
+  if (guard.IsTooComplex()) return false;
+  ParseState copy = state->parse_state;
+
+  if (ParseTwoCharToken(state, "pi") && ZeroOrMore(ParseExpression, state) &&
+      ParseOneCharToken(state, 'E')) {
+    return true;
+  }
+  state->parse_state = copy;
+
+  if (ParseTwoCharToken(state, "il") &&
+      ZeroOrMore(ParseBracedExpression, state) &&
+      ParseOneCharToken(state, 'E')) {
+    return true;
+  }
+  state->parse_state = copy;
+  return false;
+}
+
 // <expr-primary> ::= L <type> <(value) number> E
 //                ::= L <type> <(value) float> E
 //                ::= L <mangled-name> E
@@ -1819,10 +2638,35 @@
     return false;
   }
 
-  // The merged cast production.
-  if (ParseOneCharToken(state, 'L') && ParseType(state) &&
-      ParseExprCastValue(state)) {
-    return true;
+  if (ParseOneCharToken(state, 'L')) {
+    // There are two special cases in which a literal may or must contain a type
+    // without a value.  The first is that both LDnE and LDn0E are valid
+    // encodings of nullptr, used in different situations.  Recognize LDnE here,
+    // leaving LDn0E to be recognized by the general logic afterward.
+    if (ParseThreeCharToken(state, "DnE")) return true;
+
+    // The second special case is a string literal, currently mangled in C++98
+    // style as LA<length + 1>_KcE.  This is inadequate to support C++11 and
+    // later versions, and the discussion of this problem has not converged.
+    //
+    // https://github.com/itanium-cxx-abi/cxx-abi/issues/64
+    //
+    // For now the bare-type mangling is what's used in practice, so we
+    // recognize this form and only this form if an array type appears here.
+    // Someday we'll probably have to accept a new form of value mangling in
+    // LA...E constructs.  (Note also that C++20 allows a wide range of
+    // class-type objects as template arguments, so someday their values will be
+    // mangled and we'll have to recognize them here too.)
+    if (RemainingInput(state)[0] == 'A' /* an array type follows */) {
+      if (ParseType(state) && ParseOneCharToken(state, 'E')) return true;
+      state->parse_state = copy;
+      return false;
+    }
+
+    // The merged cast production.
+    if (ParseType(state) && ParseExprCastValueAndTrailingE(state)) {
+      return true;
+    }
   }
   state->parse_state = copy;
 
@@ -1836,7 +2680,7 @@
 }
 
 // <number> or <float>, followed by 'E', as described above ParseExprPrimary.
-static bool ParseExprCastValue(State *state) {
+static bool ParseExprCastValueAndTrailingE(State *state) {
   ComplexityGuard guard(state);
   if (guard.IsTooComplex()) return false;
   // We have to be able to backtrack after accepting a number because we could
@@ -1848,39 +2692,148 @@
   }
   state->parse_state = copy;
 
-  if (ParseFloatNumber(state) && ParseOneCharToken(state, 'E')) {
-    return true;
+  if (ParseFloatNumber(state)) {
+    // <float> for ordinary floating-point types
+    if (ParseOneCharToken(state, 'E')) return true;
+
+    // <float> _ <float> for complex floating-point types
+    if (ParseOneCharToken(state, '_') && ParseFloatNumber(state) &&
+        ParseOneCharToken(state, 'E')) {
+      return true;
+    }
   }
   state->parse_state = copy;
 
   return false;
 }
 
+// Parses `Q <requires-clause expr>`.
+// If parsing fails, applies backtracking to `state`.
+//
+// This function covers two symbols instead of one for convenience,
+// because in LLVM's Itanium ABI mangling grammar, <requires-clause expr>
+// always appears after Q.
+//
+// Does not emit the parsed `requires` clause to simplify the implementation.
+// In other words, these two functions' mangled names will demangle identically:
+//
+// template <typename T>
+// int foo(T) requires IsIntegral<T>;
+//
+// vs.
+//
+// template <typename T>
+// int foo(T);
+static bool ParseQRequiresClauseExpr(State *state) {
+  ComplexityGuard guard(state);
+  if (guard.IsTooComplex()) return false;
+  ParseState copy = state->parse_state;
+  DisableAppend(state);
+
+  // <requires-clause expr> is just an <expression>: http://shortn/_9E1Ul0rIM8
+  if (ParseOneCharToken(state, 'Q') && ParseExpression(state)) {
+    RestoreAppend(state, copy.append);
+    return true;
+  }
+
+  // also restores append
+  state->parse_state = copy;
+  return false;
+}
+
+// <requirement> ::= X <expression> [N] [R <type-constraint>]
+// <requirement> ::= T <type>
+// <requirement> ::= Q <constraint-expression>
+//
+// <constraint-expression> ::= <expression>
+//
+// https://github.com/itanium-cxx-abi/cxx-abi/issues/24
+static bool ParseRequirement(State *state) {
+  ComplexityGuard guard(state);
+  if (guard.IsTooComplex()) return false;
+
+  ParseState copy = state->parse_state;
+
+  if (ParseOneCharToken(state, 'X') && ParseExpression(state) &&
+      Optional(ParseOneCharToken(state, 'N')) &&
+      // This logic backtracks cleanly if we eat an R but a valid type doesn't
+      // follow it.
+      (!ParseOneCharToken(state, 'R') || ParseTypeConstraint(state))) {
+    return true;
+  }
+  state->parse_state = copy;
+
+  if (ParseOneCharToken(state, 'T') && ParseType(state)) return true;
+  state->parse_state = copy;
+
+  if (ParseOneCharToken(state, 'Q') && ParseExpression(state)) return true;
+  state->parse_state = copy;
+
+  return false;
+}
+
+// <type-constraint> ::= <name>
+static bool ParseTypeConstraint(State *state) {
+  return ParseName(state);
+}
+
 // <local-name> ::= Z <(function) encoding> E <(entity) name> [<discriminator>]
 //              ::= Z <(function) encoding> E s [<discriminator>]
+//              ::= Z <(function) encoding> E d [<(parameter) number>] _ <name>
 //
 // Parsing a common prefix of these two productions together avoids an
 // exponential blowup of backtracking.  Parse like:
 //   <local-name> := Z <encoding> E <local-name-suffix>
 //   <local-name-suffix> ::= s [<discriminator>]
+//                       ::= d [<(parameter) number>] _ <name>
 //                       ::= <name> [<discriminator>]
 
 static bool ParseLocalNameSuffix(State *state) {
   ComplexityGuard guard(state);
   if (guard.IsTooComplex()) return false;
+  ParseState copy = state->parse_state;
 
+  // <local-name-suffix> ::= d [<(parameter) number>] _ <name>
+  if (ParseOneCharToken(state, 'd') &&
+      (IsDigit(RemainingInput(state)[0]) || RemainingInput(state)[0] == '_')) {
+    int number = -1;
+    Optional(ParseNumber(state, &number));
+    if (number < -1 || number > 2147483645) {
+      // Work around overflow cases.  We do not expect these outside of a fuzzer
+      // or other source of adversarial input.  If we do detect overflow here,
+      // we'll print {default arg#1}.
+      number = -1;
+    }
+    number += 2;
+
+    // The ::{default arg#1}:: infix must be rendered before the lambda itself,
+    // so print this before parsing the rest of the <local-name-suffix>.
+    MaybeAppend(state, "::{default arg#");
+    MaybeAppendDecimal(state, number);
+    MaybeAppend(state, "}::");
+    if (ParseOneCharToken(state, '_') && ParseName(state)) return true;
+
+    // On late parse failure, roll back not only the input but also the output,
+    // whose trailing NUL was overwritten.
+    state->parse_state = copy;
+    if (state->parse_state.append) {
+      state->out[state->parse_state.out_cur_idx] = '\0';
+    }
+    return false;
+  }
+  state->parse_state = copy;
+
+  // <local-name-suffix> ::= <name> [<discriminator>]
   if (MaybeAppend(state, "::") && ParseName(state) &&
       Optional(ParseDiscriminator(state))) {
     return true;
   }
-
-  // Since we're not going to overwrite the above "::" by re-parsing the
-  // <encoding> (whose trailing '\0' byte was in the byte now holding the
-  // first ':'), we have to rollback the "::" if the <name> parse failed.
+  state->parse_state = copy;
   if (state->parse_state.append) {
-    state->out[state->parse_state.out_cur_idx - 2] = '\0';
+    state->out[state->parse_state.out_cur_idx] = '\0';
   }
 
+  // <local-name-suffix> ::= s [<discriminator>]
   return ParseOneCharToken(state, 's') && Optional(ParseDiscriminator(state));
 }
 
@@ -1896,12 +2849,22 @@
   return false;
 }
 
-// <discriminator> := _ <(non-negative) number>
+// <discriminator> := _ <digit>
+//                 := __ <number (>= 10)> _
 static bool ParseDiscriminator(State *state) {
   ComplexityGuard guard(state);
   if (guard.IsTooComplex()) return false;
   ParseState copy = state->parse_state;
-  if (ParseOneCharToken(state, '_') && ParseNumber(state, nullptr)) {
+
+  // Both forms start with _ so parse that first.
+  if (!ParseOneCharToken(state, '_')) return false;
+
+  // <digit>
+  if (ParseDigit(state, nullptr)) return true;
+
+  // _ <number> _
+  if (ParseOneCharToken(state, '_') && ParseNumber(state, nullptr) &&
+      ParseOneCharToken(state, '_')) {
     return true;
   }
   state->parse_state = copy;
@@ -1947,6 +2910,7 @@
           MaybeAppend(state, p->real_name);
         }
         ++state->parse_state.mangled_idx;
+        UpdateHighWaterMark(state);
         return true;
       }
     }
@@ -1972,10 +2936,13 @@
         MaybeAppend(state, RemainingInput(state));
         return true;
       }
+      ReportHighWaterMark(state);
       return false;  // Unconsumed suffix.
     }
     return true;
   }
+
+  ReportHighWaterMark(state);
   return false;
 }
 
@@ -1985,6 +2952,10 @@
 
 // The demangler entry point.
 bool Demangle(const char* mangled, char* out, size_t out_size) {
+  if (mangled[0] == '_' && mangled[1] == 'R') {
+    return DemangleRustSymbolEncoding(mangled, out, out_size);
+  }
+
   State state;
   InitState(&state, mangled, out, out_size);
   return ParseTopLevelMangledName(&state) && !Overflowed(&state) &&
diff --git a/absl/debugging/internal/demangle.h b/absl/debugging/internal/demangle.h
index 146d115..cb0aba1 100644
--- a/absl/debugging/internal/demangle.h
+++ b/absl/debugging/internal/demangle.h
@@ -56,6 +56,9 @@
 //
 // See the unit test for more examples.
 //
+// Demangle also recognizes Rust mangled names by delegating the parsing of
+// anything that starts with _R to DemangleRustSymbolEncoding (demangle_rust.h).
+//
 // Note: we might want to write demanglers for ABIs other than Itanium
 // C++ ABI in the future.
 bool Demangle(const char* mangled, char* out, size_t out_size);
diff --git a/absl/debugging/internal/demangle_rust.cc b/absl/debugging/internal/demangle_rust.cc
new file mode 100644
index 0000000..4309bd8
--- /dev/null
+++ b/absl/debugging/internal/demangle_rust.cc
@@ -0,0 +1,925 @@
+// Copyright 2024 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/debugging/internal/demangle_rust.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <limits>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/debugging/internal/decode_rust_punycode.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+
+namespace {
+
+// Same step limit as the C++ demangler in demangle.cc uses.
+constexpr int kMaxReturns = 1 << 17;
+
+bool IsDigit(char c) { return '0' <= c && c <= '9'; }
+bool IsLower(char c) { return 'a' <= c && c <= 'z'; }
+bool IsUpper(char c) { return 'A' <= c && c <= 'Z'; }
+bool IsAlpha(char c) { return IsLower(c) || IsUpper(c); }
+bool IsIdentifierChar(char c) { return IsAlpha(c) || IsDigit(c) || c == '_'; }
+bool IsLowerHexDigit(char c) { return IsDigit(c) || ('a' <= c && c <= 'f'); }
+
+const char* BasicTypeName(char c) {
+  switch (c) {
+    case 'a': return "i8";
+    case 'b': return "bool";
+    case 'c': return "char";
+    case 'd': return "f64";
+    case 'e': return "str";
+    case 'f': return "f32";
+    case 'h': return "u8";
+    case 'i': return "isize";
+    case 'j': return "usize";
+    case 'l': return "i32";
+    case 'm': return "u32";
+    case 'n': return "i128";
+    case 'o': return "u128";
+    case 'p': return "_";
+    case 's': return "i16";
+    case 't': return "u16";
+    case 'u': return "()";
+    case 'v': return "...";
+    case 'x': return "i64";
+    case 'y': return "u64";
+    case 'z': return "!";
+  }
+  return nullptr;
+}
+
+// Parser for Rust symbol mangling v0, whose grammar is defined here:
+//
+// https://doc.rust-lang.org/rustc/symbol-mangling/v0.html#symbol-grammar-summary
+class RustSymbolParser {
+ public:
+  // Prepares to demangle the given encoding, a Rust symbol name starting with
+  // _R, into the output buffer [out, out_end).  The caller is expected to
+  // continue by calling the new object's Parse function.
+  RustSymbolParser(const char* encoding, char* out, char* const out_end)
+      : encoding_(encoding), out_(out), out_end_(out_end) {
+    if (out_ != out_end_) *out_ = '\0';
+  }
+
+  // Parses the constructor's encoding argument, writing output into the range
+  // [out, out_end).  Returns true on success and false for input whose
+  // structure was not recognized or exceeded implementation limits, such as by
+  // nesting structures too deep.  In either case *this should not be used
+  // again.
+  ABSL_MUST_USE_RESULT bool Parse() && {
+    // Recursively parses the grammar production named by callee, then resumes
+    // execution at the next statement.
+    //
+    // Recursive-descent parsing is a beautifully readable translation of a
+    // grammar, but it risks stack overflow if implemented by naive recursion on
+    // the C++ call stack.  So we simulate recursion by goto and switch instead,
+    // keeping a bounded stack of "return addresses" in the recursion_stack_
+    // member.
+    //
+    // The callee argument is a statement label.  We goto that label after
+    // saving the "return address" on recursion_stack_.  The next continue
+    // statement in the for loop below "returns" from this "call".
+    //
+    // The caller argument names the return point.  Each value of caller must
+    // appear in only one ABSL_DEMANGLER_RECURSE call and be listed in the
+    // definition of enum ReturnAddress.  The switch implements the control
+    // transfer from the end of a "called" subroutine back to the statement
+    // after the "call".
+    //
+    // Note that not all the grammar productions have to be packed into the
+    // switch, but only those which appear in a cycle in the grammar.  Anything
+    // acyclic can be written as ordinary functions and function calls, e.g.,
+    // ParseIdentifier.
+#define ABSL_DEMANGLER_RECURSE(callee, caller) \
+    do { \
+      if (recursion_depth_ == kStackSize) return false; \
+      /* The next continue will switch on this saved value ... */ \
+      recursion_stack_[recursion_depth_++] = caller; \
+      goto callee; \
+      /* ... and will land here, resuming the suspended code. */ \
+      case caller: {} \
+    } while (0)
+
+    // Parse the encoding, counting completed recursive calls to guard against
+    // excessively complex input and infinite-loop bugs.
+    int iter = 0;
+    goto whole_encoding;
+    for (; iter < kMaxReturns && recursion_depth_ > 0; ++iter) {
+      // This switch resumes the code path most recently suspended by
+      // ABSL_DEMANGLER_RECURSE.
+      switch (recursion_stack_[--recursion_depth_]) {
+        //
+        // symbol-name ->
+        // _R decimal-number? path instantiating-crate? vendor-specific-suffix?
+        whole_encoding:
+          if (!Eat('_') || !Eat('R')) return false;
+          // decimal-number? is always empty today, so proceed to path, which
+          // can't start with a decimal digit.
+          ABSL_DEMANGLER_RECURSE(path, kInstantiatingCrate);
+          if (IsAlpha(Peek())) {
+            ++silence_depth_;  // Print nothing more from here on.
+            ABSL_DEMANGLER_RECURSE(path, kVendorSpecificSuffix);
+          }
+          switch (Take()) {
+            case '.': case '$': case '\0': return true;
+          }
+          return false;  // unexpected trailing content
+
+        // path -> crate-root | inherent-impl | trait-impl | trait-definition |
+        //         nested-path | generic-args | backref
+        //
+        // Note that ABSL_DEMANGLER_RECURSE does not work inside a nested switch
+        // (which would hide the generated case label).  Thus we jump out of the
+        // inner switch with gotos before performing any fake recursion.
+        path:
+          switch (Take()) {
+            case 'C': goto crate_root;
+            case 'M': goto inherent_impl;
+            case 'X': goto trait_impl;
+            case 'Y': goto trait_definition;
+            case 'N': goto nested_path;
+            case 'I': goto generic_args;
+            case 'B': goto path_backref;
+            default: return false;
+          }
+
+        // crate-root -> C identifier (C consumed above)
+        crate_root:
+          if (!ParseIdentifier()) return false;
+          continue;
+
+        // inherent-impl -> M impl-path type (M already consumed)
+        inherent_impl:
+          if (!Emit("<")) return false;
+          ABSL_DEMANGLER_RECURSE(impl_path, kInherentImplType);
+          ABSL_DEMANGLER_RECURSE(type, kInherentImplEnding);
+          if (!Emit(">")) return false;
+          continue;
+
+        // trait-impl -> X impl-path type path (X already consumed)
+        trait_impl:
+          if (!Emit("<")) return false;
+          ABSL_DEMANGLER_RECURSE(impl_path, kTraitImplType);
+          ABSL_DEMANGLER_RECURSE(type, kTraitImplInfix);
+          if (!Emit(" as ")) return false;
+          ABSL_DEMANGLER_RECURSE(path, kTraitImplEnding);
+          if (!Emit(">")) return false;
+          continue;
+
+        // impl-path -> disambiguator? path (but never print it!)
+        impl_path:
+          ++silence_depth_;
+          {
+            int ignored_disambiguator;
+            if (!ParseDisambiguator(ignored_disambiguator)) return false;
+          }
+          ABSL_DEMANGLER_RECURSE(path, kImplPathEnding);
+          --silence_depth_;
+          continue;
+
+        // trait-definition -> Y type path (Y already consumed)
+        trait_definition:
+          if (!Emit("<")) return false;
+          ABSL_DEMANGLER_RECURSE(type, kTraitDefinitionInfix);
+          if (!Emit(" as ")) return false;
+          ABSL_DEMANGLER_RECURSE(path, kTraitDefinitionEnding);
+          if (!Emit(">")) return false;
+          continue;
+
+        // nested-path -> N namespace path identifier (N already consumed)
+        // namespace -> lower | upper
+        nested_path:
+          // Uppercase namespaces must be saved on a stack so we can print
+          // ::{closure#0} or ::{shim:vtable#0} or ::{X:name#0} as needed.
+          if (IsUpper(Peek())) {
+            if (!PushNamespace(Take())) return false;
+            ABSL_DEMANGLER_RECURSE(path, kIdentifierInUppercaseNamespace);
+            if (!Emit("::")) return false;
+            if (!ParseIdentifier(PopNamespace())) return false;
+            continue;
+          }
+
+          // Lowercase namespaces, however, are never represented in the output;
+          // they all emit just ::name.
+          if (IsLower(Take())) {
+            ABSL_DEMANGLER_RECURSE(path, kIdentifierInLowercaseNamespace);
+            if (!Emit("::")) return false;
+            if (!ParseIdentifier()) return false;
+            continue;
+          }
+
+          // Neither upper or lower
+          return false;
+
+        // type -> basic-type | array-type | slice-type | tuple-type |
+        //         ref-type | mut-ref-type | const-ptr-type | mut-ptr-type |
+        //         fn-type | dyn-trait-type | path | backref
+        //
+        // We use ifs instead of switch (Take()) because the default case jumps
+        // to path, which will need to see the first character not yet Taken
+        // from the input.  Because we do not use a nested switch here,
+        // ABSL_DEMANGLER_RECURSE works fine in the 'S' case.
+        type:
+          if (IsLower(Peek())) {
+            const char* type_name = BasicTypeName(Take());
+            if (type_name == nullptr || !Emit(type_name)) return false;
+            continue;
+          }
+          if (Eat('A')) {
+            // array-type = A type const
+            if (!Emit("[")) return false;
+            ABSL_DEMANGLER_RECURSE(type, kArraySize);
+            if (!Emit("; ")) return false;
+            ABSL_DEMANGLER_RECURSE(constant, kFinishArray);
+            if (!Emit("]")) return false;
+            continue;
+          }
+          if (Eat('S')) {
+            if (!Emit("[")) return false;
+            ABSL_DEMANGLER_RECURSE(type, kSliceEnding);
+            if (!Emit("]")) return false;
+            continue;
+          }
+          if (Eat('T')) goto tuple_type;
+          if (Eat('R')) {
+            if (!Emit("&")) return false;
+            if (!ParseOptionalLifetime()) return false;
+            goto type;
+          }
+          if (Eat('Q')) {
+            if (!Emit("&mut ")) return false;
+            if (!ParseOptionalLifetime()) return false;
+            goto type;
+          }
+          if (Eat('P')) {
+            if (!Emit("*const ")) return false;
+            goto type;
+          }
+          if (Eat('O')) {
+            if (!Emit("*mut ")) return false;
+            goto type;
+          }
+          if (Eat('F')) goto fn_type;
+          if (Eat('D')) goto dyn_trait_type;
+          if (Eat('B')) goto type_backref;
+          goto path;
+
+        // tuple-type -> T type* E (T already consumed)
+        tuple_type:
+          if (!Emit("(")) return false;
+
+          // The toolchain should call the unit type u instead of TE, but the
+          // grammar and other demanglers also recognize TE, so we do too.
+          if (Eat('E')) {
+            if (!Emit(")")) return false;
+            continue;
+          }
+
+          // A tuple with one element is rendered (type,) instead of (type).
+          ABSL_DEMANGLER_RECURSE(type, kAfterFirstTupleElement);
+          if (Eat('E')) {
+            if (!Emit(",)")) return false;
+            continue;
+          }
+
+          // A tuple with two elements is of course (x, y).
+          if (!Emit(", ")) return false;
+          ABSL_DEMANGLER_RECURSE(type, kAfterSecondTupleElement);
+          if (Eat('E')) {
+            if (!Emit(")")) return false;
+            continue;
+          }
+
+          // And (x, y, z) for three elements.
+          if (!Emit(", ")) return false;
+          ABSL_DEMANGLER_RECURSE(type, kAfterThirdTupleElement);
+          if (Eat('E')) {
+            if (!Emit(")")) return false;
+            continue;
+          }
+
+          // For longer tuples we write (x, y, z, ...), printing none of the
+          // content of the fourth and later types.  Thus we avoid exhausting
+          // output buffers and human readers' patience when some library has a
+          // long tuple as an implementation detail, without having to
+          // completely obfuscate all tuples.
+          if (!Emit(", ...)")) return false;
+          ++silence_depth_;
+          while (!Eat('E')) {
+            ABSL_DEMANGLER_RECURSE(type, kAfterSubsequentTupleElement);
+          }
+          --silence_depth_;
+          continue;
+
+        // fn-type -> F fn-sig (F already consumed)
+        // fn-sig -> binder? U? (K abi)? type* E type
+        // abi -> C | undisambiguated-identifier
+        //
+        // We follow the C++ demangler in suppressing details of function
+        // signatures.  Every function type is rendered "fn...".
+        fn_type:
+          if (!Emit("fn...")) return false;
+          ++silence_depth_;
+          if (!ParseOptionalBinder()) return false;
+          (void)Eat('U');
+          if (Eat('K')) {
+            if (!Eat('C') && !ParseUndisambiguatedIdentifier()) return false;
+          }
+          while (!Eat('E')) {
+            ABSL_DEMANGLER_RECURSE(type, kContinueParameterList);
+          }
+          ABSL_DEMANGLER_RECURSE(type, kFinishFn);
+          --silence_depth_;
+          continue;
+
+        // dyn-trait-type -> D dyn-bounds lifetime (D already consumed)
+        // dyn-bounds -> binder? dyn-trait* E
+        //
+        // The grammar strangely allows an empty trait list, even though the
+        // compiler should never output one.  We follow existing demanglers in
+        // rendering DEL_ as "dyn ".
+        //
+        // Because auto traits lengthen a type name considerably without
+        // providing much value to a search for related source code, it would be
+        // desirable to abbreviate
+        //     dyn main::Trait + std::marker::Copy + std::marker::Send
+        // to
+        //     dyn main::Trait + ...,
+        // eliding the auto traits.  But it is difficult to do so correctly, in
+        // part because there is no guarantee that the mangling will list the
+        // main trait first.  So we just print all the traits in their order of
+        // appearance in the mangled name.
+        dyn_trait_type:
+          if (!Emit("dyn ")) return false;
+          if (!ParseOptionalBinder()) return false;
+          if (!Eat('E')) {
+            ABSL_DEMANGLER_RECURSE(dyn_trait, kBeginAutoTraits);
+            while (!Eat('E')) {
+              if (!Emit(" + ")) return false;
+              ABSL_DEMANGLER_RECURSE(dyn_trait, kContinueAutoTraits);
+            }
+          }
+          if (!ParseRequiredLifetime()) return false;
+          continue;
+
+        // dyn-trait -> path dyn-trait-assoc-binding*
+        // dyn-trait-assoc-binding -> p undisambiguated-identifier type
+        //
+        // We render nonempty binding lists as <>, omitting their contents as
+        // for generic-args.
+        dyn_trait:
+          ABSL_DEMANGLER_RECURSE(path, kContinueDynTrait);
+          if (Peek() == 'p') {
+            if (!Emit("<>")) return false;
+            ++silence_depth_;
+            while (Eat('p')) {
+              if (!ParseUndisambiguatedIdentifier()) return false;
+              ABSL_DEMANGLER_RECURSE(type, kContinueAssocBinding);
+            }
+            --silence_depth_;
+          }
+          continue;
+
+        // const -> type const-data | p | backref
+        //
+        // const is a C++ keyword, so we use the label `constant` instead.
+        constant:
+          if (Eat('B')) goto const_backref;
+          if (Eat('p')) {
+            if (!Emit("_")) return false;
+            continue;
+          }
+
+          // Scan the type without printing it.
+          //
+          // The Rust language restricts the type of a const generic argument
+          // much more than the mangling grammar does.  We do not enforce this.
+          //
+          // We also do not bother printing false, true, 'A', and '\u{abcd}' for
+          // the types bool and char.  Because we do not print generic-args
+          // contents, we expect to print constants only in array sizes, and
+          // those should not be bool or char.
+          ++silence_depth_;
+          ABSL_DEMANGLER_RECURSE(type, kConstData);
+          --silence_depth_;
+
+          // const-data -> n? hex-digit* _
+          //
+          // Although the grammar doesn't say this, existing demanglers expect
+          // that zero is 0, not an empty digit sequence, and no nonzero value
+          // may have leading zero digits.  Also n0_ is accepted and printed as
+          // -0, though a toolchain will probably never write that encoding.
+          if (Eat('n') && !EmitChar('-')) return false;
+          if (!Emit("0x")) return false;
+          if (Eat('0')) {
+            if (!EmitChar('0')) return false;
+            if (!Eat('_')) return false;
+            continue;
+          }
+          while (IsLowerHexDigit(Peek())) {
+            if (!EmitChar(Take())) return false;
+          }
+          if (!Eat('_')) return false;
+          continue;
+
+        // generic-args -> I path generic-arg* E (I already consumed)
+        //
+        // We follow the C++ demangler in omitting all the arguments from the
+        // output, printing only the list opening and closing tokens.
+        generic_args:
+          ABSL_DEMANGLER_RECURSE(path, kBeginGenericArgList);
+          if (!Emit("::<>")) return false;
+          ++silence_depth_;
+          while (!Eat('E')) {
+            ABSL_DEMANGLER_RECURSE(generic_arg, kContinueGenericArgList);
+          }
+          --silence_depth_;
+          continue;
+
+        // generic-arg -> lifetime | type | K const
+        generic_arg:
+          if (Peek() == 'L') {
+            if (!ParseOptionalLifetime()) return false;
+            continue;
+          }
+          if (Eat('K')) goto constant;
+          goto type;
+
+        // backref -> B base-62-number (B already consumed)
+        //
+        // The BeginBackref call parses and range-checks the base-62-number.  We
+        // always do that much.
+        //
+        // The recursive call parses and prints what the backref points at.  We
+        // save CPU and stack by skipping this work if the output would be
+        // suppressed anyway.
+        path_backref:
+          if (!BeginBackref()) return false;
+          if (silence_depth_ == 0) {
+            ABSL_DEMANGLER_RECURSE(path, kPathBackrefEnding);
+          }
+          EndBackref();
+          continue;
+
+        // This represents the same backref production as in path_backref but
+        // parses the target as a type instead of a path.
+        type_backref:
+          if (!BeginBackref()) return false;
+          if (silence_depth_ == 0) {
+            ABSL_DEMANGLER_RECURSE(type, kTypeBackrefEnding);
+          }
+          EndBackref();
+          continue;
+
+        const_backref:
+          if (!BeginBackref()) return false;
+          if (silence_depth_ == 0) {
+            ABSL_DEMANGLER_RECURSE(constant, kConstantBackrefEnding);
+          }
+          EndBackref();
+          continue;
+      }
+    }
+
+    return false;  // hit iteration limit or a bug in our stack handling
+  }
+
+ private:
+  // Enumerates resumption points for ABSL_DEMANGLER_RECURSE calls.
+  enum ReturnAddress : uint8_t {
+    kInstantiatingCrate,
+    kVendorSpecificSuffix,
+    kIdentifierInUppercaseNamespace,
+    kIdentifierInLowercaseNamespace,
+    kInherentImplType,
+    kInherentImplEnding,
+    kTraitImplType,
+    kTraitImplInfix,
+    kTraitImplEnding,
+    kImplPathEnding,
+    kTraitDefinitionInfix,
+    kTraitDefinitionEnding,
+    kArraySize,
+    kFinishArray,
+    kSliceEnding,
+    kAfterFirstTupleElement,
+    kAfterSecondTupleElement,
+    kAfterThirdTupleElement,
+    kAfterSubsequentTupleElement,
+    kContinueParameterList,
+    kFinishFn,
+    kBeginAutoTraits,
+    kContinueAutoTraits,
+    kContinueDynTrait,
+    kContinueAssocBinding,
+    kConstData,
+    kBeginGenericArgList,
+    kContinueGenericArgList,
+    kPathBackrefEnding,
+    kTypeBackrefEnding,
+    kConstantBackrefEnding,
+  };
+
+  // Element counts for the stack arrays.  Larger stack sizes accommodate more
+  // deeply nested names at the cost of a larger footprint on the C++ call
+  // stack.
+  enum {
+    // Maximum recursive calls outstanding at one time.
+    kStackSize = 256,
+
+    // Maximum N<uppercase> nested-paths open at once.  We do not expect
+    // closures inside closures inside closures as much as functions inside
+    // modules inside other modules, so we can use a smaller array here.
+    kNamespaceStackSize = 64,
+
+    // Maximum number of nested backrefs.  We can keep this stack pretty small
+    // because we do not follow backrefs inside generic-args or other contexts
+    // that suppress printing, so deep stacking is unlikely in practice.
+    kPositionStackSize = 16,
+  };
+
+  // Returns the next input character without consuming it.
+  char Peek() const { return encoding_[pos_]; }
+
+  // Consumes and returns the next input character.
+  char Take() { return encoding_[pos_++]; }
+
+  // If the next input character is the given character, consumes it and returns
+  // true; otherwise returns false without consuming a character.
+  ABSL_MUST_USE_RESULT bool Eat(char want) {
+    if (encoding_[pos_] != want) return false;
+    ++pos_;
+    return true;
+  }
+
+  // Provided there is enough remaining output space, appends c to the output,
+  // writing a fresh NUL terminator afterward, and returns true.  Returns false
+  // if the output buffer had less than two bytes free.
+  ABSL_MUST_USE_RESULT bool EmitChar(char c) {
+    if (silence_depth_ > 0) return true;
+    if (out_end_ - out_ < 2) return false;
+    *out_++ = c;
+    *out_ = '\0';
+    return true;
+  }
+
+  // Provided there is enough remaining output space, appends the C string token
+  // to the output, followed by a NUL character, and returns true.  Returns
+  // false if not everything fit into the output buffer.
+  ABSL_MUST_USE_RESULT bool Emit(const char* token) {
+    if (silence_depth_ > 0) return true;
+    const size_t token_length = std::strlen(token);
+    const size_t bytes_to_copy = token_length + 1;  // token and final NUL
+    if (static_cast<size_t>(out_end_ - out_) < bytes_to_copy) return false;
+    std::memcpy(out_, token, bytes_to_copy);
+    out_ += token_length;
+    return true;
+  }
+
+  // Provided there is enough remaining output space, appends the decimal form
+  // of disambiguator (if it's nonnegative) or "?" (if it's negative) to the
+  // output, followed by a NUL character, and returns true.  Returns false if
+  // not everything fit into the output buffer.
+  ABSL_MUST_USE_RESULT bool EmitDisambiguator(int disambiguator) {
+    if (disambiguator < 0) return EmitChar('?');  // parsed but too large
+    if (disambiguator == 0) return EmitChar('0');
+    // Convert disambiguator to decimal text.  Three digits per byte is enough
+    // because 999 > 256.  The bound will remain correct even if future
+    // maintenance changes the type of the disambiguator variable.
+    char digits[3 * sizeof(disambiguator)] = {};
+    size_t leading_digit_index = sizeof(digits) - 1;
+    for (; disambiguator > 0; disambiguator /= 10) {
+      digits[--leading_digit_index] =
+          static_cast<char>('0' + disambiguator % 10);
+    }
+    return Emit(digits + leading_digit_index);
+  }
+
+  // Consumes an optional disambiguator (s123_) from the input.
+  //
+  // On success returns true and fills value with the encoded value if it was
+  // not too big, otherwise with -1.  If the optional disambiguator was omitted,
+  // value is 0.  On parse failure returns false and sets value to -1.
+  ABSL_MUST_USE_RESULT bool ParseDisambiguator(int& value) {
+    value = -1;
+
+    // disambiguator = s base-62-number
+    //
+    // Disambiguators are optional.  An omitted disambiguator is zero.
+    if (!Eat('s')) {
+      value = 0;
+      return true;
+    }
+    int base_62_value = 0;
+    if (!ParseBase62Number(base_62_value)) return false;
+    value = base_62_value < 0 ? -1 : base_62_value + 1;
+    return true;
+  }
+
+  // Consumes a base-62 number like _ or 123_ from the input.
+  //
+  // On success returns true and fills value with the encoded value if it was
+  // not too big, otherwise with -1.  On parse failure returns false and sets
+  // value to -1.
+  ABSL_MUST_USE_RESULT bool ParseBase62Number(int& value) {
+    value = -1;
+
+    // base-62-number = (digit | lower | upper)* _
+    //
+    // An empty base-62 digit sequence means 0.
+    if (Eat('_')) {
+      value = 0;
+      return true;
+    }
+
+    // A nonempty digit sequence denotes its base-62 value plus 1.
+    int encoded_number = 0;
+    bool overflowed = false;
+    while (IsAlpha(Peek()) || IsDigit(Peek())) {
+      const char c = Take();
+      if (encoded_number >= std::numeric_limits<int>::max()/62) {
+        // If we are close to overflowing an int, keep parsing but stop updating
+        // encoded_number and remember to return -1 at the end.  The point is to
+        // avoid undefined behavior while parsing crate-root disambiguators,
+        // which are large in practice but not shown in demangling, while
+        // successfully computing closure and shim disambiguators, which are
+        // typically small and are printed out.
+        overflowed = true;
+      } else {
+        int digit;
+        if (IsDigit(c)) {
+          digit = c - '0';
+        } else if (IsLower(c)) {
+          digit = c - 'a' + 10;
+        } else {
+          digit = c - 'A' + 36;
+        }
+        encoded_number = 62 * encoded_number + digit;
+      }
+    }
+
+    if (!Eat('_')) return false;
+    if (!overflowed) value = encoded_number + 1;
+    return true;
+  }
+
+  // Consumes an identifier from the input, returning true on success.
+  //
+  // A nonzero uppercase_namespace specifies the character after the N in a
+  // nested-identifier, e.g., 'C' for a closure, allowing ParseIdentifier to
+  // write out the name with the conventional decoration for that namespace.
+  ABSL_MUST_USE_RESULT bool ParseIdentifier(char uppercase_namespace = '\0') {
+    // identifier -> disambiguator? undisambiguated-identifier
+    int disambiguator = 0;
+    if (!ParseDisambiguator(disambiguator)) return false;
+
+    return ParseUndisambiguatedIdentifier(uppercase_namespace, disambiguator);
+  }
+
+  // Consumes from the input an identifier with no preceding disambiguator,
+  // returning true on success.
+  //
+  // When ParseIdentifier calls this, it passes the N<namespace> character and
+  // disambiguator value so that "{closure#42}" and similar forms can be
+  // rendered correctly.
+  //
+  // At other appearances of undisambiguated-identifier in the grammar, this
+  // treatment is not applicable, and the call site omits both arguments.
+  ABSL_MUST_USE_RESULT bool ParseUndisambiguatedIdentifier(
+      char uppercase_namespace = '\0', int disambiguator = 0) {
+    // undisambiguated-identifier -> u? decimal-number _? bytes
+    const bool is_punycoded = Eat('u');
+    if (!IsDigit(Peek())) return false;
+    int num_bytes = 0;
+    if (!ParseDecimalNumber(num_bytes)) return false;
+    (void)Eat('_');  // optional separator, needed if a digit follows
+    if (is_punycoded) {
+      DecodeRustPunycodeOptions options;
+      options.punycode_begin = &encoding_[pos_];
+      options.punycode_end = &encoding_[pos_] + num_bytes;
+      options.out_begin = out_;
+      options.out_end = out_end_;
+      out_ = DecodeRustPunycode(options);
+      if (out_ == nullptr) return false;
+      pos_ += static_cast<size_t>(num_bytes);
+    }
+
+    // Emit the beginnings of braced forms like {shim:vtable#0}.
+    if (uppercase_namespace != '\0') {
+      switch (uppercase_namespace) {
+        case 'C':
+          if (!Emit("{closure")) return false;
+          break;
+        case 'S':
+          if (!Emit("{shim")) return false;
+          break;
+        default:
+          if (!EmitChar('{') || !EmitChar(uppercase_namespace)) return false;
+          break;
+      }
+      if (num_bytes > 0 && !Emit(":")) return false;
+    }
+
+    // Emit the name itself.
+    if (!is_punycoded) {
+      for (int i = 0; i < num_bytes; ++i) {
+        const char c = Take();
+        if (!IsIdentifierChar(c) &&
+            // The spec gives toolchains the choice of Punycode or raw UTF-8 for
+            // identifiers containing code points above 0x7f, so accept bytes
+            // with the high bit set.
+            (c & 0x80) == 0) {
+          return false;
+        }
+        if (!EmitChar(c)) return false;
+      }
+    }
+
+    // Emit the endings of braced forms, e.g., "#42}".
+    if (uppercase_namespace != '\0') {
+      if (!EmitChar('#')) return false;
+      if (!EmitDisambiguator(disambiguator)) return false;
+      if (!EmitChar('}')) return false;
+    }
+
+    return true;
+  }
+
+  // Consumes a decimal number like 0 or 123 from the input.  On success returns
+  // true and fills value with the encoded value.  If the encoded value is too
+  // large or otherwise unparsable, returns false and sets value to -1.
+  ABSL_MUST_USE_RESULT bool ParseDecimalNumber(int& value) {
+    value = -1;
+    if (!IsDigit(Peek())) return false;
+    int encoded_number = Take() - '0';
+    if (encoded_number == 0) {
+      // Decimal numbers are never encoded with extra leading zeroes.
+      value = 0;
+      return true;
+    }
+    while (IsDigit(Peek()) &&
+           // avoid overflow
+           encoded_number < std::numeric_limits<int>::max()/10) {
+      encoded_number = 10 * encoded_number + (Take() - '0');
+    }
+    if (IsDigit(Peek())) return false;  // too big
+    value = encoded_number;
+    return true;
+  }
+
+  // Consumes a binder of higher-ranked lifetimes if one is present.  On success
+  // returns true and discards the encoded lifetime count.  On parse failure
+  // returns false.
+  ABSL_MUST_USE_RESULT bool ParseOptionalBinder() {
+    // binder -> G base-62-number
+    if (!Eat('G')) return true;
+    int ignored_binding_count;
+    return ParseBase62Number(ignored_binding_count);
+  }
+
+  // Consumes a lifetime if one is present.
+  //
+  // On success returns true and discards the lifetime index.  We do not print
+  // or even range-check lifetimes because they are a finer detail than other
+  // things we omit from output, such as the entire contents of generic-args.
+  //
+  // On parse failure returns false.
+  ABSL_MUST_USE_RESULT bool ParseOptionalLifetime() {
+    // lifetime -> L base-62-number
+    if (!Eat('L')) return true;
+    int ignored_de_bruijn_index;
+    return ParseBase62Number(ignored_de_bruijn_index);
+  }
+
+  // Consumes a lifetime just like ParseOptionalLifetime, but returns false if
+  // there is no lifetime here.
+  ABSL_MUST_USE_RESULT bool ParseRequiredLifetime() {
+    if (Peek() != 'L') return false;
+    return ParseOptionalLifetime();
+  }
+
+  // Pushes ns onto the namespace stack and returns true if the stack is not
+  // full, else returns false.
+  ABSL_MUST_USE_RESULT bool PushNamespace(char ns) {
+    if (namespace_depth_ == kNamespaceStackSize) return false;
+    namespace_stack_[namespace_depth_++] = ns;
+    return true;
+  }
+
+  // Pops the last pushed namespace.  Requires that the namespace stack is not
+  // empty (namespace_depth_ > 0).
+  char PopNamespace() { return namespace_stack_[--namespace_depth_]; }
+
+  // Pushes position onto the position stack and returns true if the stack is
+  // not full, else returns false.
+  ABSL_MUST_USE_RESULT bool PushPosition(int position) {
+    if (position_depth_ == kPositionStackSize) return false;
+    position_stack_[position_depth_++] = position;
+    return true;
+  }
+
+  // Pops the last pushed input position.  Requires that the position stack is
+  // not empty (position_depth_ > 0).
+  int PopPosition() { return position_stack_[--position_depth_]; }
+
+  // Consumes a base-62-number denoting a backref target, pushes the current
+  // input position on the data stack, and sets the input position to the
+  // beginning of the backref target.  Returns true on success.  Returns false
+  // if parsing failed, the stack is exhausted, or the backref target position
+  // is out of range.
+  ABSL_MUST_USE_RESULT bool BeginBackref() {
+    // backref = B base-62-number (B already consumed)
+    //
+    // Reject backrefs that don't parse, overflow int, or don't point backward.
+    // If the offset looks fine, adjust it to account for the _R prefix.
+    int offset = 0;
+    const int offset_of_this_backref =
+        pos_ - 2 /* _R */ - 1 /* B already consumed */;
+    if (!ParseBase62Number(offset) || offset < 0 ||
+        offset >= offset_of_this_backref) {
+      return false;
+    }
+    offset += 2;
+
+    // Save the old position to restore later.
+    if (!PushPosition(pos_)) return false;
+
+    // Move the input position to the backref target.
+    //
+    // Note that we do not check whether the new position points to the
+    // beginning of a construct matching the context in which the backref
+    // appeared.  We just jump to it and see whether nested parsing succeeds.
+    // We therefore accept various wrong manglings, e.g., a type backref
+    // pointing to an 'l' character inside an identifier, which happens to mean
+    // i32 when parsed as a type mangling.  This saves the complexity and RAM
+    // footprint of remembering which offsets began which kinds of
+    // substructures.  Existing demanglers take similar shortcuts.
+    pos_ = offset;
+    return true;
+  }
+
+  // Cleans up after a backref production by restoring the previous input
+  // position from the data stack.
+  void EndBackref() { pos_ = PopPosition(); }
+
+  // The leftmost recursion_depth_ elements of recursion_stack_ contain the
+  // ReturnAddresses pushed by ABSL_DEMANGLER_RECURSE calls not yet completed.
+  ReturnAddress recursion_stack_[kStackSize] = {};
+  int recursion_depth_ = 0;
+
+  // The leftmost namespace_depth_ elements of namespace_stack_ contain the
+  // uppercase namespace identifiers for open nested-paths, e.g., 'C' for a
+  // closure.
+  char namespace_stack_[kNamespaceStackSize] = {};
+  int namespace_depth_ = 0;
+
+  // The leftmost position_depth_ elements of position_stack_ contain the input
+  // positions to return to after fully printing the targets of backrefs.
+  int position_stack_[kPositionStackSize] = {};
+  int position_depth_ = 0;
+
+  // Anything parsed while silence_depth_ > 0 contributes nothing to the
+  // demangled output.  For constructs omitted from the demangling, such as
+  // impl-path and the contents of generic-args, we will increment
+  // silence_depth_ on the way in and decrement silence_depth_ on the way out.
+  int silence_depth_ = 0;
+
+  // Input: encoding_ points to a Rust mangled symbol, and encoding_[pos_] is
+  // the next input character to be scanned.
+  int pos_ = 0;
+  const char* encoding_ = nullptr;
+
+  // Output: *out_ is where the next output character should be written, and
+  // out_end_ points past the last byte of available space.
+  char* out_ = nullptr;
+  char* out_end_ = nullptr;
+};
+
+}  // namespace
+
+bool DemangleRustSymbolEncoding(const char* mangled, char* out,
+                                size_t out_size) {
+  return RustSymbolParser(mangled, out, out + out_size).Parse();
+}
+
+}  // namespace debugging_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/absl/debugging/internal/demangle_rust.h b/absl/debugging/internal/demangle_rust.h
new file mode 100644
index 0000000..94a9aec
--- /dev/null
+++ b/absl/debugging/internal/demangle_rust.h
@@ -0,0 +1,42 @@
+// Copyright 2024 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_DEBUGGING_INTERNAL_DEMANGLE_RUST_H_
+#define ABSL_DEBUGGING_INTERNAL_DEMANGLE_RUST_H_
+
+#include <cstddef>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+
+// Demangle the Rust encoding `mangled`.  On success, return true and write the
+// demangled symbol name to `out`.  Otherwise, return false, leaving unspecified
+// contents in `out`.  For example, calling DemangleRustSymbolEncoding with
+// `mangled = "_RNvC8my_crate7my_func"` will yield `my_crate::my_func` in `out`,
+// provided `out_size` is large enough for that value and its trailing NUL.
+//
+// DemangleRustSymbolEncoding is async-signal-safe and runs in bounded C++
+// call-stack space.  It is suitable for symbolizing stack traces in a signal
+// handler.
+bool DemangleRustSymbolEncoding(const char* mangled, char* out,
+                                size_t out_size);
+
+}  // namespace debugging_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_DEBUGGING_INTERNAL_DEMANGLE_RUST_H_
diff --git a/absl/debugging/internal/demangle_rust_test.cc b/absl/debugging/internal/demangle_rust_test.cc
new file mode 100644
index 0000000..8ceb1fd
--- /dev/null
+++ b/absl/debugging/internal/demangle_rust_test.cc
@@ -0,0 +1,584 @@
+// Copyright 2024 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/debugging/internal/demangle_rust.h"
+
+#include <cstddef>
+#include <string>
+
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+namespace {
+
+// If DemangleRustSymbolEncoding(mangled, <buffer with room for buffer_size
+// chars>, buffer_size) returns true and seems not to have overrun its output
+// buffer, returns the string written by DemangleRustSymbolEncoding; otherwise
+// returns an error message.
+std::string ResultOfDemangling(const char* mangled, size_t buffer_size) {
+  // Fill the buffer with something other than NUL so we test whether Demangle
+  // appends trailing NUL as expected.
+  std::string buffer(buffer_size + 1, '~');
+  constexpr char kCanaryCharacter = 0x7f;  // arbitrary unlikely value
+  buffer[buffer_size] = kCanaryCharacter;
+  if (!DemangleRustSymbolEncoding(mangled, &buffer[0], buffer_size)) {
+    return "Failed parse";
+  }
+  if (buffer[buffer_size] != kCanaryCharacter) {
+    return "Buffer overrun by output: " + buffer.substr(0, buffer_size + 1)
+        + "...";
+  }
+  return buffer.data();  // Not buffer itself: this trims trailing padding.
+}
+
+// Tests that DemangleRustSymbolEncoding converts mangled into plaintext given
+// enough output buffer space but returns false and avoids overrunning a buffer
+// that is one byte too short.
+//
+// The lambda wrapping allows ASSERT_EQ to branch out the first time an
+// expectation is not satisfied, preventing redundant errors for the same bug.
+//
+// We test first with excess space so that if the algorithm just computes the
+// wrong answer, it will be clear from the error log that the bounds checks are
+// unlikely to be the code at fault.
+#define EXPECT_DEMANGLING(mangled, plaintext) \
+  do { \
+    [] { \
+      constexpr size_t plenty_of_space = sizeof(plaintext) + 128; \
+      constexpr size_t just_enough_space = sizeof(plaintext); \
+      constexpr size_t one_byte_too_few = sizeof(plaintext) - 1; \
+      const char* expected_plaintext = plaintext; \
+      const char* expected_error = "Failed parse"; \
+      ASSERT_EQ(ResultOfDemangling(mangled, plenty_of_space), \
+                expected_plaintext); \
+      ASSERT_EQ(ResultOfDemangling(mangled, just_enough_space), \
+                expected_plaintext); \
+      ASSERT_EQ(ResultOfDemangling(mangled, one_byte_too_few), \
+                expected_error); \
+    }(); \
+  } while (0)
+
+// Tests that DemangleRustSymbolEncoding rejects the given input (typically, a
+// truncation of a real Rust symbol name).
+#define EXPECT_DEMANGLING_FAILS(mangled) \
+    do { \
+      constexpr size_t plenty_of_space = 1024; \
+      const char* expected_error = "Failed parse"; \
+      EXPECT_EQ(ResultOfDemangling(mangled, plenty_of_space), expected_error); \
+    } while (0)
+
+// Piping grep -C 1 _R demangle_test.cc into your favorite c++filt
+// implementation allows you to verify that the goldens below are reasonable.
+
+TEST(DemangleRust, EmptyDemangling) {
+  EXPECT_TRUE(DemangleRustSymbolEncoding("_RC0", nullptr, 0));
+}
+
+TEST(DemangleRust, FunctionAtCrateLevel) {
+  EXPECT_DEMANGLING("_RNvC10crate_name9func_name", "crate_name::func_name");
+  EXPECT_DEMANGLING(
+      "_RNvCs09azAZ_10crate_name9func_name", "crate_name::func_name");
+}
+
+TEST(DemangleRust, TruncationsOfFunctionAtCrateLevel) {
+  EXPECT_DEMANGLING_FAILS("_R");
+  EXPECT_DEMANGLING_FAILS("_RN");
+  EXPECT_DEMANGLING_FAILS("_RNvC");
+  EXPECT_DEMANGLING_FAILS("_RNvC10");
+  EXPECT_DEMANGLING_FAILS("_RNvC10crate_nam");
+  EXPECT_DEMANGLING_FAILS("_RNvC10crate_name");
+  EXPECT_DEMANGLING_FAILS("_RNvC10crate_name9");
+  EXPECT_DEMANGLING_FAILS("_RNvC10crate_name9func_nam");
+  EXPECT_DEMANGLING_FAILS("_RNvCs");
+  EXPECT_DEMANGLING_FAILS("_RNvCs09azAZ");
+  EXPECT_DEMANGLING_FAILS("_RNvCs09azAZ_");
+}
+
+TEST(DemangleRust, VendorSuffixes) {
+  EXPECT_DEMANGLING("_RNvC10crate_name9func_name.!@#", "crate_name::func_name");
+  EXPECT_DEMANGLING("_RNvC10crate_name9func_name$!@#", "crate_name::func_name");
+}
+
+TEST(DemangleRust, UnicodeIdentifiers) {
+  EXPECT_DEMANGLING("_RNvC7ice_cap17Eyjafjallajökull",
+                    "ice_cap::Eyjafjallajökull");
+  EXPECT_DEMANGLING("_RNvC7ice_caps_u19Eyjafjallajkull_jtb",
+                    "ice_cap::Eyjafjallajökull");
+}
+
+TEST(DemangleRust, FunctionInModule) {
+  EXPECT_DEMANGLING("_RNvNtCs09azAZ_10crate_name11module_name9func_name",
+                    "crate_name::module_name::func_name");
+}
+
+TEST(DemangleRust, FunctionInFunction) {
+  EXPECT_DEMANGLING(
+      "_RNvNvCs09azAZ_10crate_name15outer_func_name15inner_func_name",
+      "crate_name::outer_func_name::inner_func_name");
+}
+
+TEST(DemangleRust, ClosureInFunction) {
+  EXPECT_DEMANGLING(
+      "_RNCNvCs09azAZ_10crate_name9func_name0",
+      "crate_name::func_name::{closure#0}");
+  EXPECT_DEMANGLING(
+      "_RNCNvCs09azAZ_10crate_name9func_name0Cs123_12client_crate",
+      "crate_name::func_name::{closure#0}");
+}
+
+TEST(DemangleRust, ClosureNumbering) {
+  EXPECT_DEMANGLING(
+      "_RNCNvCs09azAZ_10crate_name9func_names_0Cs123_12client_crate",
+      "crate_name::func_name::{closure#1}");
+  EXPECT_DEMANGLING(
+      "_RNCNvCs09azAZ_10crate_name9func_names0_0Cs123_12client_crate",
+      "crate_name::func_name::{closure#2}");
+  EXPECT_DEMANGLING(
+      "_RNCNvCs09azAZ_10crate_name9func_names9_0Cs123_12client_crate",
+      "crate_name::func_name::{closure#11}");
+  EXPECT_DEMANGLING(
+      "_RNCNvCs09azAZ_10crate_name9func_namesa_0Cs123_12client_crate",
+      "crate_name::func_name::{closure#12}");
+  EXPECT_DEMANGLING(
+      "_RNCNvCs09azAZ_10crate_name9func_namesz_0Cs123_12client_crate",
+      "crate_name::func_name::{closure#37}");
+  EXPECT_DEMANGLING(
+      "_RNCNvCs09azAZ_10crate_name9func_namesA_0Cs123_12client_crate",
+      "crate_name::func_name::{closure#38}");
+  EXPECT_DEMANGLING(
+      "_RNCNvCs09azAZ_10crate_name9func_namesZ_0Cs123_12client_crate",
+      "crate_name::func_name::{closure#63}");
+  EXPECT_DEMANGLING(
+      "_RNCNvCs09azAZ_10crate_name9func_names10_0Cs123_12client_crate",
+      "crate_name::func_name::{closure#64}");
+  EXPECT_DEMANGLING(
+      "_RNCNvCs09azAZ_10crate_name9func_namesg6_0Cs123_12client_crate",
+      "crate_name::func_name::{closure#1000}");
+}
+
+TEST(DemangleRust, ClosureNumberOverflowingInt) {
+  EXPECT_DEMANGLING(
+      "_RNCNvCs09azAZ_10crate_name9func_names1234567_0Cs123_12client_crate",
+      "crate_name::func_name::{closure#?}");
+}
+
+TEST(DemangleRust, UnexpectedlyNamedClosure) {
+  EXPECT_DEMANGLING(
+      "_RNCNvCs123_10crate_name9func_name12closure_nameCs456_12client_crate",
+      "crate_name::func_name::{closure:closure_name#0}");
+  EXPECT_DEMANGLING(
+      "_RNCNvCs123_10crate_name9func_names2_12closure_nameCs456_12client_crate",
+      "crate_name::func_name::{closure:closure_name#4}");
+}
+
+TEST(DemangleRust, ItemNestedInsideClosure) {
+  EXPECT_DEMANGLING(
+      "_RNvNCNvCs123_10crate_name9func_name015inner_func_nameCs_12client_crate",
+      "crate_name::func_name::{closure#0}::inner_func_name");
+}
+
+TEST(DemangleRust, Shim) {
+  EXPECT_DEMANGLING(
+      "_RNSNvCs123_10crate_name9func_name6vtableCs456_12client_crate",
+      "crate_name::func_name::{shim:vtable#0}");
+}
+
+TEST(DemangleRust, UnknownUppercaseNamespace) {
+  EXPECT_DEMANGLING(
+      "_RNXNvCs123_10crate_name9func_name14mystery_objectCs456_12client_crate",
+      "crate_name::func_name::{X:mystery_object#0}");
+}
+
+TEST(DemangleRust, NestedUppercaseNamespaces) {
+  EXPECT_DEMANGLING(
+      "_RNCNXNYCs123_10crate_names0_1ys1_1xs2_0Cs456_12client_crate",
+      "crate_name::{Y:y#2}::{X:x#3}::{closure#4}");
+}
+
+TEST(DemangleRust, TraitDefinition) {
+  EXPECT_DEMANGLING(
+      "_RNvYNtC7crate_a9my_structNtC7crate_b8my_trait1f",
+      "<crate_a::my_struct as crate_b::my_trait>::f");
+}
+
+TEST(DemangleRust, BasicTypeNames) {
+  EXPECT_DEMANGLING("_RNvYaNtC1c1t1f", "<i8 as c::t>::f");
+  EXPECT_DEMANGLING("_RNvYbNtC1c1t1f", "<bool as c::t>::f");
+  EXPECT_DEMANGLING("_RNvYcNtC1c1t1f", "<char as c::t>::f");
+  EXPECT_DEMANGLING("_RNvYdNtC1c1t1f", "<f64 as c::t>::f");
+  EXPECT_DEMANGLING("_RNvYeNtC1c1t1f", "<str as c::t>::f");
+  EXPECT_DEMANGLING("_RNvYfNtC1c1t1f", "<f32 as c::t>::f");
+  EXPECT_DEMANGLING("_RNvYhNtC1c1t1f", "<u8 as c::t>::f");
+  EXPECT_DEMANGLING("_RNvYiNtC1c1t1f", "<isize as c::t>::f");
+  EXPECT_DEMANGLING("_RNvYjNtC1c1t1f", "<usize as c::t>::f");
+  EXPECT_DEMANGLING("_RNvYlNtC1c1t1f", "<i32 as c::t>::f");
+  EXPECT_DEMANGLING("_RNvYmNtC1c1t1f", "<u32 as c::t>::f");
+  EXPECT_DEMANGLING("_RNvYnNtC1c1t1f", "<i128 as c::t>::f");
+  EXPECT_DEMANGLING("_RNvYoNtC1c1t1f", "<u128 as c::t>::f");
+  EXPECT_DEMANGLING("_RNvYpNtC1c1t1f", "<_ as c::t>::f");
+  EXPECT_DEMANGLING("_RNvYsNtC1c1t1f", "<i16 as c::t>::f");
+  EXPECT_DEMANGLING("_RNvYtNtC1c1t1f", "<u16 as c::t>::f");
+  EXPECT_DEMANGLING("_RNvYuNtC1c1t1f", "<() as c::t>::f");
+  EXPECT_DEMANGLING("_RNvYvNtC1c1t1f", "<... as c::t>::f");
+  EXPECT_DEMANGLING("_RNvYxNtC1c1t1f", "<i64 as c::t>::f");
+  EXPECT_DEMANGLING("_RNvYyNtC1c1t1f", "<u64 as c::t>::f");
+  EXPECT_DEMANGLING("_RNvYzNtC1c1t1f", "<! as c::t>::f");
+
+  EXPECT_DEMANGLING_FAILS("_RNvYkNtC1c1t1f");
+}
+
+TEST(DemangleRust, SliceTypes) {
+  EXPECT_DEMANGLING("_RNvYSlNtC1c1t1f", "<[i32] as c::t>::f");
+  EXPECT_DEMANGLING("_RNvYSNtC1d1sNtC1c1t1f", "<[d::s] as c::t>::f");
+}
+
+TEST(DemangleRust, ImmutableReferenceTypes) {
+  EXPECT_DEMANGLING("_RNvYRlNtC1c1t1f", "<&i32 as c::t>::f");
+  EXPECT_DEMANGLING("_RNvYRNtC1d1sNtC1c1t1f", "<&d::s as c::t>::f");
+}
+
+TEST(DemangleRust, MutableReferenceTypes) {
+  EXPECT_DEMANGLING("_RNvYQlNtC1c1t1f", "<&mut i32 as c::t>::f");
+  EXPECT_DEMANGLING("_RNvYQNtC1d1sNtC1c1t1f", "<&mut d::s as c::t>::f");
+}
+
+TEST(DemangleRust, ConstantRawPointerTypes) {
+  EXPECT_DEMANGLING("_RNvYPlNtC1c1t1f", "<*const i32 as c::t>::f");
+  EXPECT_DEMANGLING("_RNvYPNtC1d1sNtC1c1t1f", "<*const d::s as c::t>::f");
+}
+
+TEST(DemangleRust, MutableRawPointerTypes) {
+  EXPECT_DEMANGLING("_RNvYOlNtC1c1t1f", "<*mut i32 as c::t>::f");
+  EXPECT_DEMANGLING("_RNvYONtC1d1sNtC1c1t1f", "<*mut d::s as c::t>::f");
+}
+
+TEST(DemangleRust, TupleLength0) {
+  EXPECT_DEMANGLING("_RNvYTENtC1c1t1f", "<() as c::t>::f");
+}
+
+TEST(DemangleRust, TupleLength1) {
+  EXPECT_DEMANGLING("_RNvYTlENtC1c1t1f", "<(i32,) as c::t>::f");
+  EXPECT_DEMANGLING("_RNvYTNtC1d1sENtC1c1t1f", "<(d::s,) as c::t>::f");
+}
+
+TEST(DemangleRust, TupleLength2) {
+  EXPECT_DEMANGLING("_RNvYTlmENtC1c1t1f", "<(i32, u32) as c::t>::f");
+  EXPECT_DEMANGLING("_RNvYTNtC1d1xNtC1e1yENtC1c1t1f",
+                    "<(d::x, e::y) as c::t>::f");
+}
+
+TEST(DemangleRust, TupleLength3) {
+  EXPECT_DEMANGLING("_RNvYTlmnENtC1c1t1f", "<(i32, u32, i128) as c::t>::f");
+  EXPECT_DEMANGLING("_RNvYTNtC1d1xNtC1e1yNtC1f1zENtC1c1t1f",
+                    "<(d::x, e::y, f::z) as c::t>::f");
+}
+
+TEST(DemangleRust, LongerTuplesAbbreviated) {
+  EXPECT_DEMANGLING("_RNvYTlmnoENtC1c1t1f",
+                    "<(i32, u32, i128, ...) as c::t>::f");
+  EXPECT_DEMANGLING("_RNvYTlmnNtC1d1xNtC1e1yENtC1c1t1f",
+                    "<(i32, u32, i128, ...) as c::t>::f");
+}
+
+TEST(DemangleRust, PathBackrefToCrate) {
+  EXPECT_DEMANGLING("_RNvYNtC8my_crate9my_structNtB4_8my_trait1f",
+                    "<my_crate::my_struct as my_crate::my_trait>::f");
+}
+
+TEST(DemangleRust, PathBackrefToNestedPath) {
+  EXPECT_DEMANGLING("_RNvYNtNtC1c1m1sNtB4_1t1f", "<c::m::s as c::m::t>::f");
+}
+
+TEST(DemangleRust, PathBackrefAsInstantiatingCrate) {
+  EXPECT_DEMANGLING("_RNCNvC8my_crate7my_func0B3_",
+                    "my_crate::my_func::{closure#0}");
+}
+
+TEST(DemangleRust, TypeBackrefsNestedInTuple) {
+  EXPECT_DEMANGLING("_RNvYTTRlB4_ERB3_ENtC1c1t1f",
+                    "<((&i32, &i32), &(&i32, &i32)) as c::t>::f");
+}
+
+TEST(DemangleRust, NoInfiniteLoopOnBackrefToTheWhole) {
+  EXPECT_DEMANGLING_FAILS("_RB_");
+  EXPECT_DEMANGLING_FAILS("_RNvB_1sNtC1c1t1f");
+}
+
+TEST(DemangleRust, NoCrashOnForwardBackref) {
+  EXPECT_DEMANGLING_FAILS("_RB0_");
+  EXPECT_DEMANGLING_FAILS("_RB1_");
+  EXPECT_DEMANGLING_FAILS("_RB2_");
+  EXPECT_DEMANGLING_FAILS("_RB3_");
+  EXPECT_DEMANGLING_FAILS("_RB4_");
+}
+
+TEST(DemangleRust, PathBackrefsDoNotRecurseDuringSilence) {
+  // B_ points at the value f (the whole mangling), so the cycle would lead to
+  // parse failure if the parser tried to parse what was pointed to.
+  EXPECT_DEMANGLING("_RNvYTlmnNtB_1sENtC1c1t1f",
+                    "<(i32, u32, i128, ...) as c::t>::f");
+}
+
+TEST(DemangleRust, TypeBackrefsDoNotRecurseDuringSilence) {
+  // B2_ points at the tuple type, likewise making a cycle that the parser
+  // avoids following.
+  EXPECT_DEMANGLING("_RNvYTlmnB2_ENtC1c1t1f",
+                    "<(i32, u32, i128, ...) as c::t>::f");
+}
+
+TEST(DemangleRust, ConstBackrefsDoNotRecurseDuringSilence) {
+  // B_ points at the whole I...E mangling, which does not parse as a const.
+  EXPECT_DEMANGLING("_RINvC1c1fAlB_E", "c::f::<>");
+}
+
+TEST(DemangleRust, ReturnFromBackrefToInputPosition256) {
+  // Show that we can resume at input positions that don't fit into a byte.
+  EXPECT_DEMANGLING("_RNvYNtC1c238very_long_type_"
+                    "ABCDEFGHIJabcdefghijABCDEFGHIJabcdefghij"
+                    "ABCDEFGHIJabcdefghijABCDEFGHIJabcdefghij"
+                    "ABCDEFGHIJabcdefghijABCDEFGHIJabcdefghij"
+                    "ABCDEFGHIJabcdefghijABCDEFGHIJabcdefghij"
+                    "ABCDEFGHIJabcdefghijABCDEFGHIJabcdefghij"
+                    "ABCDEFGHIJabcdefghijABC"
+                    "NtB4_1t1f",
+                    "<c::very_long_type_"
+                    "ABCDEFGHIJabcdefghijABCDEFGHIJabcdefghij"
+                    "ABCDEFGHIJabcdefghijABCDEFGHIJabcdefghij"
+                    "ABCDEFGHIJabcdefghijABCDEFGHIJabcdefghij"
+                    "ABCDEFGHIJabcdefghijABCDEFGHIJabcdefghij"
+                    "ABCDEFGHIJabcdefghijABCDEFGHIJabcdefghij"
+                    "ABCDEFGHIJabcdefghijABC"
+                    " as c::t>::f");
+}
+
+TEST(DemangleRust, EmptyGenericArgs) {
+  EXPECT_DEMANGLING("_RINvC1c1fE", "c::f::<>");
+}
+
+TEST(DemangleRust, OneSimpleTypeInGenericArgs) {
+  EXPECT_DEMANGLING("_RINvC1c1flE",  // c::f::<i32>
+                    "c::f::<>");
+}
+
+TEST(DemangleRust, OneTupleInGenericArgs) {
+  EXPECT_DEMANGLING("_RINvC1c1fTlmEE",  // c::f::<(i32, u32)>
+                    "c::f::<>");
+}
+
+TEST(DemangleRust, OnePathInGenericArgs) {
+  EXPECT_DEMANGLING("_RINvC1c1fNtC1d1sE",  // c::f::<d::s>
+                    "c::f::<>");
+}
+
+TEST(DemangleRust, LongerGenericArgs) {
+  EXPECT_DEMANGLING("_RINvC1c1flmRNtC1d1sE",  // c::f::<i32, u32, &d::s>
+                    "c::f::<>");
+}
+
+TEST(DemangleRust, BackrefInGenericArgs) {
+  EXPECT_DEMANGLING("_RINvC1c1fRlB7_NtB2_1sE",  // c::f::<&i32, &i32, c::s>
+                    "c::f::<>");
+}
+
+TEST(DemangleRust, NestedGenericArgs) {
+  EXPECT_DEMANGLING("_RINvC1c1fINtB2_1slEmE",  // c::f::<c::s::<i32>, u32>
+                    "c::f::<>");
+}
+
+TEST(DemangleRust, MonomorphicEntityNestedInsideGeneric) {
+  EXPECT_DEMANGLING("_RNvINvC1c1fppE1g",  // c::f::<_, _>::g
+                    "c::f::<>::g");
+}
+
+TEST(DemangleRust, ArrayTypeWithSimpleElementType) {
+  EXPECT_DEMANGLING("_RNvYAlj1f_NtC1c1t1f", "<[i32; 0x1f] as c::t>::f");
+}
+
+TEST(DemangleRust, ArrayTypeWithComplexElementType) {
+  EXPECT_DEMANGLING("_RNvYAINtC1c1slEj1f_NtB6_1t1f",
+                    "<[c::s::<>; 0x1f] as c::t>::f");
+}
+
+TEST(DemangleRust, NestedArrayType) {
+  EXPECT_DEMANGLING("_RNvYAAlj1f_j2e_NtC1c1t1f",
+                    "<[[i32; 0x1f]; 0x2e] as c::t>::f");
+}
+
+TEST(DemangleRust, BackrefArraySize) {
+  EXPECT_DEMANGLING("_RNvYAAlj1f_B5_NtC1c1t1f",
+                    "<[[i32; 0x1f]; 0x1f] as c::t>::f");
+}
+
+TEST(DemangleRust, ZeroArraySize) {
+  EXPECT_DEMANGLING("_RNvYAlj0_NtC1c1t1f", "<[i32; 0x0] as c::t>::f");
+}
+
+TEST(DemangleRust, SurprisingMinusesInArraySize) {
+  // Compilers shouldn't do this stuff, but existing demanglers accept it.
+  EXPECT_DEMANGLING("_RNvYAljn0_NtC1c1t1f", "<[i32; -0x0] as c::t>::f");
+  EXPECT_DEMANGLING("_RNvYAljn42_NtC1c1t1f", "<[i32; -0x42] as c::t>::f");
+}
+
+TEST(DemangleRust, NumberAsGenericArg) {
+  EXPECT_DEMANGLING("_RINvC1c1fKl8_E",  // c::f::<0x8>
+                    "c::f::<>");
+}
+
+TEST(DemangleRust, NumberAsFirstOfTwoGenericArgs) {
+  EXPECT_DEMANGLING("_RINvC1c1fKl8_mE",  // c::f::<0x8, u32>
+                    "c::f::<>");
+}
+
+TEST(DemangleRust, NumberAsSecondOfTwoGenericArgs) {
+  EXPECT_DEMANGLING("_RINvC1c1fmKl8_E",  // c::f::<u32, 0x8>
+                    "c::f::<>");
+}
+
+TEST(DemangleRust, NumberPlaceholder) {
+  EXPECT_DEMANGLING("_RNvINvC1c1fKpE1g",  // c::f::<_>::g
+                    "c::f::<>::g");
+}
+
+TEST(DemangleRust, InherentImplWithoutDisambiguator) {
+  EXPECT_DEMANGLING("_RNvMNtC8my_crate6my_modNtB2_9my_struct7my_func",
+                    "<my_crate::my_mod::my_struct>::my_func");
+}
+
+TEST(DemangleRust, InherentImplWithDisambiguator) {
+  EXPECT_DEMANGLING("_RNvMs_NtC8my_crate6my_modNtB4_9my_struct7my_func",
+                    "<my_crate::my_mod::my_struct>::my_func");
+}
+
+TEST(DemangleRust, TraitImplWithoutDisambiguator) {
+  EXPECT_DEMANGLING("_RNvXC8my_crateNtB2_9my_structNtB2_8my_trait7my_func",
+                    "<my_crate::my_struct as my_crate::my_trait>::my_func");
+}
+
+TEST(DemangleRust, TraitImplWithDisambiguator) {
+  EXPECT_DEMANGLING("_RNvXs_C8my_crateNtB4_9my_structNtB4_8my_trait7my_func",
+                    "<my_crate::my_struct as my_crate::my_trait>::my_func");
+}
+
+TEST(DemangleRust, TraitImplWithNonpathSelfType) {
+  EXPECT_DEMANGLING("_RNvXC8my_crateRlNtB2_8my_trait7my_func",
+                    "<&i32 as my_crate::my_trait>::my_func");
+}
+
+TEST(DemangleRust, ThunkType) {
+  EXPECT_DEMANGLING("_RNvYFEuNtC1c1t1f",  // <fn() as c::t>::f
+                    "<fn... as c::t>::f");
+}
+
+TEST(DemangleRust, NontrivialFunctionReturnType) {
+  EXPECT_DEMANGLING(
+      "_RNvYFERTlmENtC1c1t1f",  // <fn() -> &(i32, u32) as c::t>::f
+      "<fn... as c::t>::f");
+}
+
+TEST(DemangleRust, OneParameterType) {
+  EXPECT_DEMANGLING("_RNvYFlEuNtC1c1t1f",  // <fn(i32) as c::t>::f
+                    "<fn... as c::t>::f");
+}
+
+TEST(DemangleRust, TwoParameterTypes) {
+  EXPECT_DEMANGLING("_RNvYFlmEuNtC1c1t1f",  // <fn(i32, u32) as c::t>::f
+                    "<fn... as c::t>::f");
+}
+
+TEST(DemangleRust, ExternC) {
+  EXPECT_DEMANGLING("_RNvYFKCEuNtC1c1t1f",  // <extern "C" fn() as c::t>>::f
+                    "<fn... as c::t>::f");
+}
+
+TEST(DemangleRust, ExternOther) {
+  EXPECT_DEMANGLING(
+      "_RNvYFK5not_CEuNtC1c1t1f",  // <extern "not-C" fn() as c::t>::f
+      "<fn... as c::t>::f");
+}
+
+TEST(DemangleRust, Unsafe) {
+  EXPECT_DEMANGLING("_RNvYFUEuNtC1c1t1f",  // <unsafe fn() as c::t>::f
+                    "<fn... as c::t>::f");
+}
+
+TEST(DemangleRust, Binder) {
+  EXPECT_DEMANGLING(
+      // <for<'a> fn(&'a i32) -> &'a i32 as c::t>::f
+      "_RNvYFG_RL0_lEB5_NtC1c1t1f",
+      "<fn... as c::t>::f");
+}
+
+TEST(DemangleRust, AllFnSigFeaturesInOrder) {
+  EXPECT_DEMANGLING(
+      // <for<'a> unsafe extern "C" fn(&'a i32) -> &'a i32 as c::t>::f
+      "_RNvYFG_UKCRL0_lEB8_NtC1c1t1f",
+      "<fn... as c::t>::f");
+}
+
+TEST(DemangleRust, LifetimeInGenericArgs) {
+  EXPECT_DEMANGLING("_RINvC1c1fINtB2_1sL_EE",  // c::f::<c::s::<'_>>
+                    "c::f::<>");
+}
+
+TEST(DemangleRust, EmptyDynTrait) {
+  // This shouldn't happen, but the grammar allows it and existing demanglers
+  // accept it.
+  EXPECT_DEMANGLING("_RNvYDEL_NtC1c1t1f",
+                    "<dyn  as c::t>::f");
+}
+
+TEST(DemangleRust, SimpleDynTrait) {
+  EXPECT_DEMANGLING("_RNvYDNtC1c1tEL_NtC1d1u1f",
+                    "<dyn c::t as d::u>::f");
+}
+
+TEST(DemangleRust, DynTraitWithOneAssociatedType) {
+  EXPECT_DEMANGLING(
+      "_RNvYDNtC1c1tp1xlEL_NtC1d1u1f",  // <dyn c::t<x = i32> as d::u>::f
+      "<dyn c::t<> as d::u>::f");
+}
+
+TEST(DemangleRust, DynTraitWithTwoAssociatedTypes) {
+  EXPECT_DEMANGLING(
+      // <dyn c::t<x = i32, y = u32> as d::u>::f
+      "_RNvYDNtC1c1tp1xlp1ymEL_NtC1d1u1f",
+      "<dyn c::t<> as d::u>::f");
+}
+
+TEST(DemangleRust, DynTraitPlusAutoTrait) {
+  EXPECT_DEMANGLING(
+      "_RNvYDNtC1c1tNtNtC3std6marker4SendEL_NtC1d1u1f",
+      "<dyn c::t + std::marker::Send as d::u>::f");
+}
+
+TEST(DemangleRust, DynTraitPlusTwoAutoTraits) {
+  EXPECT_DEMANGLING(
+      "_RNvYDNtC1c1tNtNtC3std6marker4CopyNtBc_4SyncEL_NtC1d1u1f",
+      "<dyn c::t + std::marker::Copy + std::marker::Sync as d::u>::f");
+}
+
+TEST(DemangleRust, HigherRankedDynTrait) {
+  EXPECT_DEMANGLING(
+      // <dyn for<'a> c::t::<&'a i32> as d::u>::f
+      "_RNvYDG_INtC1c1tRL0_lEEL_NtC1d1u1f",
+      "<dyn c::t::<> as d::u>::f");
+}
+
+}  // namespace
+}  // namespace debugging_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/absl/debugging/internal/demangle_test.cc b/absl/debugging/internal/demangle_test.cc
index a16ab75..5579221 100644
--- a/absl/debugging/internal/demangle_test.cc
+++ b/absl/debugging/internal/demangle_test.cc
@@ -31,6 +31,477 @@
 
 using ::testing::ContainsRegex;
 
+TEST(Demangle, FunctionTemplate) {
+  char tmp[100];
+
+  // template <typename T>
+  // int foo(T);
+  //
+  // foo<int>(5);
+  ASSERT_TRUE(Demangle("_Z3fooIiEiT_", tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "foo<>()");
+}
+
+TEST(Demangle, FunctionTemplateWithNesting) {
+  char tmp[100];
+
+  // template <typename T>
+  // int foo(T);
+  //
+  // foo<Wrapper<int>>({ .value = 5 });
+  ASSERT_TRUE(Demangle("_Z3fooI7WrapperIiEEiT_", tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "foo<>()");
+}
+
+TEST(Demangle, FunctionTemplateWithNonTypeParamConstraint) {
+  char tmp[100];
+
+  // template <std::integral T>
+  // int foo(T);
+  //
+  // foo<int>(5);
+  ASSERT_TRUE(Demangle("_Z3fooITkSt8integraliEiT_", tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "foo<>()");
+}
+
+TEST(Demangle, FunctionTemplateWithFunctionRequiresClause) {
+  char tmp[100];
+
+  // template <typename T>
+  // int foo() requires std::integral<T>;
+  //
+  // foo<int>();
+  ASSERT_TRUE(Demangle("_Z3fooIiEivQsr3stdE8integralIT_E", tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "foo<>()");
+}
+
+TEST(Demangle, FunctionWithTemplateParamRequiresClause) {
+  char tmp[100];
+
+  // template <typename T>
+  //     requires std::integral<T>
+  // int foo();
+  //
+  // foo<int>();
+  ASSERT_TRUE(Demangle("_Z3fooIiQsr3stdE8integralIT_EEiv", tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "foo<>()");
+}
+
+TEST(Demangle, FunctionWithTemplateParamAndFunctionRequiresClauses) {
+  char tmp[100];
+
+  // template <typename T>
+  //     requires std::integral<T>
+  // int foo() requires std::integral<T>;
+  //
+  // foo<int>();
+  ASSERT_TRUE(Demangle("_Z3fooIiQsr3stdE8integralIT_EEivQsr3stdE8integralIS0_E",
+                       tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "foo<>()");
+}
+
+TEST(Demangle, FunctionTemplateBacktracksOnMalformedRequiresClause) {
+  char tmp[100];
+
+  // template <typename T>
+  // int foo(T);
+  //
+  // foo<int>(5);
+  // Except there's an extra `Q` where the mangled requires clause would be.
+  ASSERT_FALSE(Demangle("_Z3fooIiQEiT_", tmp, sizeof(tmp)));
+}
+
+TEST(Demangle, FunctionTemplateWithAutoParam) {
+  char tmp[100];
+
+  // template <auto>
+  // void foo();
+  //
+  // foo<1>();
+  ASSERT_TRUE(Demangle("_Z3fooITnDaLi1EEvv", tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "foo<>()");
+}
+
+TEST(Demangle, FunctionTemplateWithNonTypeParamPack) {
+  char tmp[100];
+
+  // template <int&..., typename T>
+  // void foo(T);
+  //
+  // foo(2);
+  ASSERT_TRUE(Demangle("_Z3fooITpTnRiJEiEvT0_", tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "foo<>()");
+}
+
+TEST(Demangle, FunctionTemplateTemplateParamWithConstrainedArg) {
+  char tmp[100];
+
+  // template <typename T>
+  // concept True = true;
+  //
+  // template <typename T> requires True<T>
+  // struct Fooer {};
+  //
+  // template <template <typename T> typename>
+  // void foo() {}
+  //
+  // foo<Fooer>();
+  ASSERT_TRUE(Demangle("_Z3fooITtTyE5FooerEvv", tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "foo<>()");
+}
+
+TEST(Demangle, ConstrainedAutoInFunctionTemplate) {
+  char tmp[100];
+
+  // template <typename T> concept C = true;
+  // template <C auto N> void f() {}
+  // template void f<0>();
+  ASSERT_TRUE(Demangle("_Z1fITnDk1CLi0EEvv", tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "f<>()");
+}
+
+TEST(Demangle, ConstrainedFriendFunctionTemplate) {
+  char tmp[100];
+
+  // Source:
+  //
+  // namespace ns {
+  // template <class T> struct Y {
+  //   friend void y(Y) requires true {}
+  // };
+  // }  // namespace ns
+  //
+  // y(ns::Y<int>{});
+  //
+  // LLVM demangling:
+  //
+  // ns::Y<int>::friend y(ns::Y<int>) requires true
+  ASSERT_TRUE(Demangle("_ZN2ns1YIiEF1yES1_QLb1E", tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "ns::Y<>::friend y()");
+}
+
+TEST(Demangle, ConstrainedFriendOperatorTemplate) {
+  char tmp[100];
+
+  // ns::Y<int>::friend operator*(ns::Y<int>) requires true
+  ASSERT_TRUE(Demangle("_ZN2ns1YIiEFdeES1_QLb1E", tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "ns::Y<>::friend operator*()");
+}
+
+TEST(Demangle, NonTemplateBuiltinType) {
+  char tmp[100];
+
+  // void foo(__my_builtin_type t);
+  //
+  // foo({});
+  ASSERT_TRUE(Demangle("_Z3foou17__my_builtin_type", tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "foo()");
+}
+
+TEST(Demangle, SingleArgTemplateBuiltinType) {
+  char tmp[100];
+
+  // template <typename T>
+  // __my_builtin_type<T> foo();
+  //
+  // foo<int>();
+  ASSERT_TRUE(Demangle("_Z3fooIiEu17__my_builtin_typeIT_Ev", tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "foo<>()");
+}
+
+TEST(Demangle, TwoArgTemplateBuiltinType) {
+  char tmp[100];
+
+  // template <typename T, typename U>
+  // __my_builtin_type<T, U> foo();
+  //
+  // foo<int, char>();
+  ASSERT_TRUE(
+      Demangle("_Z3fooIicEu17__my_builtin_typeIT_T0_Ev", tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "foo<>()");
+}
+
+TEST(Demangle, TypeNestedUnderTemplatedBuiltinType) {
+  char tmp[100];
+
+  // Source:
+  //
+  // template <typename T>
+  // typename std::remove_reference_t<T>::type f(T t);
+  //
+  // struct C { using type = C; };
+  //
+  // f<const C&>(C{});
+  //
+  // These days std::remove_reference_t is implemented in terms of a vendor
+  // builtin __remove_reference_t.  A full demangling might look like:
+  //
+  // __remove_reference_t<C const&>::type f<C const&>(C const&)
+  ASSERT_TRUE(Demangle("_Z1fIRK1CENu20__remove_reference_tIT_E4typeES3_",
+                       tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, TemplateTemplateParamSubstitution) {
+  char tmp[100];
+
+  // template <typename T>
+  // concept True = true;
+  //
+  // template<std::integral T, T> struct Foolable {};
+  // template<template<typename T, T> typename> void foo() {}
+  //
+  // template void foo<Foolable>();
+  ASSERT_TRUE(Demangle("_Z3fooITtTyTnTL0__E8FoolableEvv", tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "foo<>()");
+}
+
+TEST(Demangle, TemplateParamSubstitutionWithGenericLambda) {
+  char tmp[100];
+
+  // template <typename>
+  // struct Fooer {
+  //     template <typename>
+  //     void foo(decltype([](auto x, auto y) {})) {}
+  // };
+  //
+  // Fooer<int> f;
+  // f.foo<int>({});
+  ASSERT_TRUE(
+      Demangle("_ZN5FooerIiE3fooIiEEvNS0_UlTL0__TL0_0_E_E", tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "Fooer<>::foo<>()");
+}
+
+TEST(Demangle, LambdaRequiresTrue) {
+  char tmp[100];
+
+  // auto $_0::operator()<int>(int) const requires true
+  ASSERT_TRUE(Demangle("_ZNK3$_0clIiEEDaT_QLb1E", tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "$_0::operator()<>()");
+}
+
+TEST(Demangle, LambdaRequiresSimpleExpression) {
+  char tmp[100];
+
+  // auto $_0::operator()<int>(int) const requires 2 + 2 == 4
+  ASSERT_TRUE(Demangle("_ZNK3$_0clIiEEDaT_QeqplLi2ELi2ELi4E",
+                       tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "$_0::operator()<>()");
+}
+
+TEST(Demangle, LambdaRequiresRequiresExpressionContainingTrue) {
+  char tmp[100];
+
+  // auto $_0::operator()<int>(int) const requires requires { true; }
+  ASSERT_TRUE(Demangle("_ZNK3$_0clIiEEDaT_QrqXLb1EE", tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "$_0::operator()<>()");
+}
+
+TEST(Demangle, LambdaRequiresRequiresExpressionContainingConcept) {
+  char tmp[100];
+
+  // auto $_0::operator()<int>(int) const
+  // requires requires { std::same_as<decltype(fp), int>; }
+  ASSERT_TRUE(Demangle("_ZNK3$_0clIiEEDaT_QrqXsr3stdE7same_asIDtfp_EiEE",
+                       tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "$_0::operator()<>()");
+}
+
+TEST(Demangle, LambdaRequiresRequiresExpressionContainingNoexceptExpression) {
+  char tmp[100];
+
+  // auto $_0::operator()<int>(int) const
+  // requires requires { {fp + fp} noexcept; }
+  ASSERT_TRUE(Demangle("_ZNK3$_0clIiEEDaT_QrqXplfp_fp_NE", tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "$_0::operator()<>()");
+}
+
+TEST(Demangle, LambdaRequiresRequiresExpressionContainingReturnTypeConstraint) {
+  char tmp[100];
+
+  // auto $_0::operator()<int>(int) const
+  // requires requires { {fp + fp} -> std::same_as<decltype(fp)>; }
+  ASSERT_TRUE(Demangle("_ZNK3$_0clIiEEDaT_QrqXplfp_fp_RNSt7same_asIDtfp_EEEE",
+                       tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "$_0::operator()<>()");
+}
+
+TEST(Demangle, LambdaRequiresRequiresExpressionWithBothNoexceptAndReturnType) {
+  char tmp[100];
+
+  // auto $_0::operator()<int>(int) const
+  // requires requires { {fp + fp} noexcept -> std::same_as<decltype(fp)>; }
+  ASSERT_TRUE(Demangle("_ZNK3$_0clIiEEDaT_QrqXplfp_fp_NRNSt7same_asIDtfp_EEEE",
+                       tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "$_0::operator()<>()");
+}
+
+TEST(Demangle, LambdaRequiresRequiresExpressionContainingType) {
+  char tmp[100];
+
+  // auto $_0::operator()<S>(S) const
+  // requires requires { typename S::T; }
+  ASSERT_TRUE(Demangle("_ZNK3$_0clI1SEEDaT_QrqTNS2_1TEE", tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "$_0::operator()<>()");
+}
+
+TEST(Demangle, LambdaRequiresRequiresExpressionNestingAnotherRequires) {
+  char tmp[100];
+
+  // auto $_0::operator()<int>(int) const requires requires { requires true; }
+  ASSERT_TRUE(Demangle("_ZNK3$_0clIiEEDaT_QrqQLb1EE", tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "$_0::operator()<>()");
+}
+
+TEST(Demangle, LambdaRequiresRequiresExpressionContainingTwoRequirements) {
+  char tmp[100];
+
+  // auto $_0::operator()<int>(int) const
+  // requires requires { requires true; requires 2 + 2 == 4; }
+  ASSERT_TRUE(Demangle("_ZNK3$_0clIiEEDaT_QrqXLb1EXeqplLi2ELi2ELi4EE",
+                       tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "$_0::operator()<>()");
+}
+
+TEST(Demangle, RequiresExpressionWithItsOwnParameter) {
+  char tmp[100];
+
+  // S<requires (int) { fp + fp; }> f<int>(int)
+  ASSERT_TRUE(Demangle("_Z1fIiE1SIXrQT__XplfL0p_fp_EEES1_", tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "f<>()");
+}
+
+TEST(Demangle, LambdaWithExplicitTypeArgument) {
+  char tmp[100];
+
+  // Source:
+  //
+  // template <class T> T f(T t) {
+  //   return []<class U>(U u) { return u + u; }(t);
+  // }
+  //
+  // template int f<int>(int);
+  //
+  // Full LLVM demangling of the lambda call operator:
+  //
+  // auto int f<int>(int)::'lambda'<typename $T>(int)::
+  // operator()<int>(int) const
+  ASSERT_TRUE(Demangle("_ZZ1fIiET_S0_ENKUlTyS0_E_clIiEEDaS0_",
+                       tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "f<>()::{lambda()#1}::operator()<>()");
+}
+
+TEST(Demangle, LambdaWithExplicitPackArgument) {
+  char tmp[100];
+
+  // Source:
+  //
+  // template <class T> T h(T t) {
+  //   return []<class... U>(U... u) {
+  //     return ((u + u) + ... + 0);
+  //   }(t);
+  // }
+  //
+  // template int h<int>(int);
+  //
+  // Full LLVM demangling of the lambda call operator:
+  //
+  // auto int f<int>(int)::'lambda'<typename ...$T>($T...)::
+  // operator()<int>($T...) const
+  ASSERT_TRUE(Demangle("_ZZ1fIiET_S0_ENKUlTpTyDpT_E_clIJiEEEDaS2_",
+                       tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "f<>()::{lambda()#1}::operator()<>()");
+}
+
+TEST(Demangle, LambdaInClassMemberDefaultArgument) {
+  char tmp[100];
+
+  // Source:
+  //
+  // struct S {
+  //   static auto f(void (*g)() = [] {}) { return g; }
+  // };
+  // void (*p)() = S::f();
+  //
+  // Full LLVM demangling of the lambda call operator:
+  //
+  // S::f(void (*)())::'lambda'()::operator()() const
+  //
+  // Full GNU binutils demangling:
+  //
+  // S::f(void (*)())::{default arg#1}::{lambda()#1}::operator()() const
+  ASSERT_TRUE(Demangle("_ZZN1S1fEPFvvEEd_NKUlvE_clEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "S::f()::{default arg#1}::{lambda()#1}::operator()()");
+
+  // The same but in the second rightmost default argument.
+  ASSERT_TRUE(Demangle("_ZZN1S1fEPFvvEEd0_NKUlvE_clEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "S::f()::{default arg#2}::{lambda()#1}::operator()()");
+
+  // Reject negative <(parameter) number> values.
+  ASSERT_FALSE(Demangle("_ZZN1S1fEPFvvEEdn1_NKUlvE_clEv", tmp, sizeof(tmp)));
+}
+
+TEST(Demangle, AvoidSignedOverflowForUnfortunateParameterNumbers) {
+  char tmp[100];
+
+  // Here <number> + 2 fits in an int, but just barely.  (We expect no such
+  // input in practice: real functions don't have billions of arguments.)
+  ASSERT_TRUE(Demangle("_ZZN1S1fEPFvvEEd2147483645_NKUlvE_clEv",
+                       tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp,
+               "S::f()::{default arg#2147483647}::{lambda()#1}::operator()()");
+
+  // Now <number> is an int, but <number> + 2 is not.
+  ASSERT_TRUE(Demangle("_ZZN1S1fEPFvvEEd2147483646_NKUlvE_clEv",
+                       tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "S::f()::{default arg#1}::{lambda()#1}::operator()()");
+
+  // <number> is the largest int.
+  ASSERT_TRUE(Demangle("_ZZN1S1fEPFvvEEd2147483647_NKUlvE_clEv",
+                       tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "S::f()::{default arg#1}::{lambda()#1}::operator()()");
+
+  // <number> itself does not fit into an int.  ParseNumber truncates the value
+  // to int, yielding a large negative number, which we strain out.
+  ASSERT_TRUE(Demangle("_ZZN1S1fEPFvvEEd2147483648_NKUlvE_clEv",
+                       tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "S::f()::{default arg#1}::{lambda()#1}::operator()()");
+}
+
+TEST(Demangle, SubstpackNotationForTroublesomeTemplatePack) {
+  char tmp[100];
+
+  // Source:
+  //
+  // template <template <class> class, template <class> class> struct B {};
+  //
+  // template <template <class> class... T> struct A {
+  //   template <template <class> class... U> void f(B<T, U>&&...) {}
+  // };
+  //
+  // template void A<>::f<>();
+  //
+  // LLVM can't demangle its own _SUBSTPACK_ notation.
+  ASSERT_TRUE(Demangle("_ZN1AIJEE1fIJEEEvDpO1BI_SUBSTPACK_T_E",
+                       tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "A<>::f<>()");
+}
+
+TEST(Demangle, TemplateTemplateParamAppearingAsBackrefFollowedByTemplateArgs) {
+  char tmp[100];
+
+  // Source:
+  //
+  // template <template <class> class C> struct W {
+  //   template <class T> static decltype(C<T>::m()) f() { return {}; }
+  // };
+  //
+  // template <class T> struct S { static int m() { return 0; } };
+  // template decltype(S<int>::m()) W<S>::f<int>();
+  ASSERT_TRUE(Demangle("_ZN1WI1SE1fIiEEDTclsrS0_IT_EE1mEEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ(tmp, "W<>::f<>()");
+}
+
 // Test corner cases of boundary conditions.
 TEST(Demangle, CornerCases) {
   char tmp[10];
@@ -95,6 +566,250 @@
   EXPECT_FALSE(Demangle("_ZL3Foov.isra.2.constprop.", tmp, sizeof(tmp)));
 }
 
+TEST(Demangle, Discriminators) {
+  char tmp[80];
+
+  // Source:
+  //
+  // using Thunk = void (*)();
+  //
+  // Thunk* f() {
+  //   static Thunk thunks[12] = {};
+  //
+  // #define THUNK(i) [backslash here]
+  //   do { struct S { static void g() {} }; thunks[i] = &S::g; } while (0)
+  //
+  //   THUNK(0);
+  //   [... repeat for 1 to 10 ...]
+  //   THUNK(11);
+  //
+  //   return thunks;
+  // }
+  //
+  // The test inputs are manglings of some of the S::g member functions.
+
+  // The first one omits the discriminator.
+  EXPECT_TRUE(Demangle("_ZZ1fvEN1S1gEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f()::S::g()", tmp);
+
+  // The second one encodes 0.
+  EXPECT_TRUE(Demangle("_ZZ1fvEN1S1gE_0v", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f()::S::g()", tmp);
+
+  // The eleventh one encodes 9.
+  EXPECT_TRUE(Demangle("_ZZ1fvEN1S1gE_9v", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f()::S::g()", tmp);
+
+  // The twelfth one encodes 10 with extra underscores delimiting it.
+  EXPECT_TRUE(Demangle("_ZZ1fvEN1S1gE__10_v", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f()::S::g()", tmp);
+}
+
+TEST(Demangle, SingleDigitDiscriminatorFollowedByADigit) {
+  char tmp[80];
+
+  // Don't parse 911 as a number.
+  EXPECT_TRUE(Demangle("_ZZ1fvEN1S1gE_911return_type", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f()::S::g()", tmp);
+}
+
+TEST(Demangle, LiteralOfGlobalNamespaceEnumType) {
+  char tmp[80];
+
+  // void f<(E)42>()
+  EXPECT_TRUE(Demangle("_Z1fIL1E42EEvv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, NullptrLiterals) {
+  char tmp[80];
+
+  // void f<nullptr>()
+  EXPECT_TRUE(Demangle("_Z1fILDnEEvv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+
+  // also void f<nullptr>()
+  EXPECT_TRUE(Demangle("_Z1fILDn0EEvv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, StringLiterals) {
+  char tmp[80];
+
+  // void f<"<char const [42]>">()
+  EXPECT_TRUE(Demangle("_Z1fILA42_KcEEvv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, ComplexFloatingPointLiterals) {
+  char tmp[80];
+
+  // Source (use g++ -fext-numeric-literals to compile):
+  //
+  // using C = double _Complex;
+  // template <class T> void f(char (&)[sizeof(C{sizeof(T)} + 4.0j)]) {}
+  // template void f<int>(char (&)[sizeof(C{sizeof(int)} + 4.0j)]);
+  //
+  // GNU demangling:
+  //
+  // void f<int>(char (&) [sizeof (double _Complex{sizeof (int)}+
+  // ((double _Complex)0000000000000000_4010000000000000))])
+  EXPECT_TRUE(Demangle(
+      "_Z1fIiEvRAszpltlCdstT_ELS0_0000000000000000_4010000000000000E_c",
+      tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, Float128) {
+  char tmp[80];
+
+  // S::operator _Float128() const
+  EXPECT_TRUE(Demangle("_ZNK1ScvDF128_Ev", tmp, sizeof(tmp)));
+  EXPECT_STREQ("S::operator _Float128()", tmp);
+}
+
+TEST(Demangle, Float128x) {
+  char tmp[80];
+
+  // S::operator _Float128x() const
+  EXPECT_TRUE(Demangle("_ZNK1ScvDF128xEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("S::operator _Float128x()", tmp);
+}
+
+TEST(Demangle, Bfloat16) {
+  char tmp[80];
+
+  // S::operator std::bfloat16_t() const
+  EXPECT_TRUE(Demangle("_ZNK1ScvDF16bEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("S::operator std::bfloat16_t()", tmp);
+}
+
+TEST(Demangle, SimpleSignedBitInt) {
+  char tmp[80];
+
+  // S::operator _BitInt(256)() const
+  EXPECT_TRUE(Demangle("_ZNK1ScvDB256_Ev", tmp, sizeof(tmp)));
+  EXPECT_STREQ("S::operator _BitInt(256)()", tmp);
+}
+
+TEST(Demangle, SimpleUnsignedBitInt) {
+  char tmp[80];
+
+  // S::operator unsigned _BitInt(256)() const
+  EXPECT_TRUE(Demangle("_ZNK1ScvDU256_Ev", tmp, sizeof(tmp)));
+  EXPECT_STREQ("S::operator unsigned _BitInt(256)()", tmp);
+}
+
+TEST(Demangle, DependentBitInt) {
+  char tmp[80];
+
+  // S::operator _BitInt(256)<256>() const
+  EXPECT_TRUE(Demangle("_ZNK1ScvDBT__ILi256EEEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("S::operator _BitInt(?)<>()", tmp);
+}
+
+TEST(Demangle, ConversionToPointerType) {
+  char tmp[80];
+
+  // S::operator int*() const
+  EXPECT_TRUE(Demangle("_ZNK1ScvPiEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("S::operator int*()", tmp);
+}
+
+TEST(Demangle, ConversionToLvalueReferenceType) {
+  char tmp[80];
+
+  // S::operator int&() const
+  EXPECT_TRUE(Demangle("_ZNK1ScvRiEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("S::operator int&()", tmp);
+}
+
+TEST(Demangle, ConversionToRvalueReferenceType) {
+  char tmp[80];
+
+  // S::operator int&&() const
+  EXPECT_TRUE(Demangle("_ZNK1ScvOiEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("S::operator int&&()", tmp);
+}
+
+TEST(Demangle, ConversionToComplexFloatingPointType) {
+  char tmp[80];
+
+  // S::operator float _Complex() const
+  EXPECT_TRUE(Demangle("_ZNK1ScvCfEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("S::operator float _Complex()", tmp);
+}
+
+TEST(Demangle, ConversionToImaginaryFloatingPointType) {
+  char tmp[80];
+
+  // S::operator float _Imaginary() const
+  EXPECT_TRUE(Demangle("_ZNK1ScvGfEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("S::operator float _Imaginary()", tmp);
+}
+
+TEST(Demangle, ConversionToPointerToCvQualifiedType) {
+  char tmp[80];
+
+  // S::operator int const volatile restrict*() const
+  EXPECT_TRUE(Demangle("_ZNK1ScvPrVKiEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("S::operator int const volatile restrict*()", tmp);
+}
+
+TEST(Demangle, ConversionToLayeredPointerType) {
+  char tmp[80];
+
+  // S::operator int const* const*() const
+  EXPECT_TRUE(Demangle("_ZNK1ScvPKPKiEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("S::operator int const* const*()", tmp);
+}
+
+TEST(Demangle, ConversionToTypeWithExtendedQualifier) {
+  char tmp[80];
+
+  // S::operator int const AS128*() const
+  //
+  // Because our scan of easy type constructors stops at the extended qualifier,
+  // the demangling preserves the * but loses the const.
+  EXPECT_TRUE(Demangle("_ZNK1ScvPU5AS128KiEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("S::operator int*()", tmp);
+}
+
+TEST(Demangle, GlobalInitializers) {
+  char tmp[80];
+
+  // old form without suffix
+  EXPECT_TRUE(Demangle("_ZGR1v", tmp, sizeof(tmp)));
+  EXPECT_STREQ("reference temporary for v", tmp);
+
+  // modern form for the whole initializer
+  EXPECT_TRUE(Demangle("_ZGR1v_", tmp, sizeof(tmp)));
+  EXPECT_STREQ("reference temporary for v", tmp);
+
+  // next subobject in depth-first preorder traversal
+  EXPECT_TRUE(Demangle("_ZGR1v0_", tmp, sizeof(tmp)));
+  EXPECT_STREQ("reference temporary for v", tmp);
+
+  // subobject with a larger seq-id
+  EXPECT_TRUE(Demangle("_ZGR1v1Z_", tmp, sizeof(tmp)));
+  EXPECT_STREQ("reference temporary for v", tmp);
+}
+
+TEST(Demangle, StructuredBindings) {
+  char tmp[80];
+
+  // Source:
+  //
+  // struct S { int a, b; };
+  // const auto& [x, y] = S{1, 2};
+
+  // [x, y]
+  EXPECT_TRUE(Demangle("_ZDC1x1yE", tmp, sizeof(tmp)));
+
+  // reference temporary for [x, y]
+  EXPECT_TRUE(Demangle("_ZGRDC1x1yE_", tmp, sizeof(tmp)));
+}
+
 // Test the GNU abi_tag extension.
 TEST(Demangle, AbiTags) {
   char tmp[80];
@@ -119,6 +834,1078 @@
   EXPECT_STREQ("C[abi:bar][abi:foo]()", tmp);
 }
 
+TEST(Demangle, SimpleGnuVectorSize) {
+  char tmp[80];
+
+  // Source:
+  //
+  // #define VECTOR(size) __attribute__((vector_size(size)))
+  // void f(int x VECTOR(32)) {}
+  //
+  // The attribute's size is a number of bytes.  The compiler verifies that this
+  // value corresponds to a whole number of elements and emits the number of
+  // elements as a <number> in the mangling.  With sizeof(int) == 4, that yields
+  // 32/4 = 8.
+  //
+  // LLVM demangling:
+  //
+  // f(int vector[8])
+  EXPECT_TRUE(Demangle("_Z1fDv8_i", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f()", tmp);
+}
+
+TEST(Demangle, GnuVectorSizeIsATemplateParameter) {
+  char tmp[80];
+
+  // Source:
+  //
+  // #define VECTOR(size) __attribute__((vector_size(size)))
+  // template <int n> void f(int x VECTOR(n)) {}
+  // template void f<32>(int x VECTOR(32));
+  //
+  // LLVM demangling:
+  //
+  // void f<32>(int vector[32])
+  //
+  // Because the size was dependent on a template parameter, it was encoded
+  // using the general expression encoding.  Nothing in the mangling says how
+  // big the element type is, so the demangler is unable to show the element
+  // count 8 instead of the byte count 32.  Arguably it would have been better
+  // to make the narrow production encode the byte count, so that nondependent
+  // and dependent versions of a 32-byte vector would both come out as
+  // vector[32].
+  EXPECT_TRUE(Demangle("_Z1fILi32EEvDvT__i", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, GnuVectorSizeIsADependentOperatorExpression) {
+  char tmp[80];
+
+  // Source:
+  //
+  // #define VECTOR(size) __attribute__((vector_size(size)))
+  // template <int n> void f(int x VECTOR(2 * n)) {}
+  // template void f<32>(int x VECTOR(2 * 32));
+  //
+  // LLVM demangling:
+  //
+  // void f<32>(int vector[2 * 32])
+  EXPECT_TRUE(Demangle("_Z1fILi32EEvDvmlLi2ET__i", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, SimpleAddressSpace) {
+  char tmp[80];
+
+  // Source:
+  //
+  // void f(const int __attribute__((address_space(128)))*) {}
+  //
+  // LLVM demangling:
+  //
+  // f(int const AS128*)
+  //
+  // Itanium ABI 5.1.5.1, "Qualified types", notes that address_space is mangled
+  // nonuniformly as a legacy exception: the number is part of the source-name
+  // if nondependent but is an expression in template-args if dependent.  Thus
+  // it is a convenient test case for both forms.
+  EXPECT_TRUE(Demangle("_Z1fPU5AS128Ki", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f()", tmp);
+}
+
+TEST(Demangle, DependentAddressSpace) {
+  char tmp[80];
+
+  // Source:
+  //
+  // template <int n> void f (const int __attribute__((address_space(n)))*) {}
+  // template void f<128>(const int __attribute__((address_space(128)))*);
+  //
+  // LLVM demangling:
+  //
+  // void f<128>(int AS<128>*)
+  EXPECT_TRUE(Demangle("_Z1fILi128EEvPU2ASIT_Ei", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, TransactionSafeEntryPoint) {
+  char tmp[80];
+
+  EXPECT_TRUE(Demangle("_ZGTt1fv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("transaction clone for f()", tmp);
+}
+
+TEST(Demangle, TransactionSafeFunctionType) {
+  char tmp[80];
+
+  // GNU demangling: f(void (*)() transaction_safe)
+  EXPECT_TRUE(Demangle("_Z1fPDxFvvE", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f()", tmp);
+}
+
+TEST(Demangle, TemplateParameterObject) {
+  char tmp[80];
+
+  // Source:
+  //
+  // struct S { int x, y; };
+  // template <S s, const S* p = &s> void f() {}
+  // template void f<S{1, 2}>();
+  //
+  // LLVM demangling:
+  //
+  // void f<S{1, 2}, &template parameter object for S{1, 2}>()
+  EXPECT_TRUE(Demangle("_Z1fIXtl1SLi1ELi2EEEXadL_ZTAXtlS0_Li1ELi2EEEEEEvv",
+                       tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+
+  // The name of the object standing alone.
+  //
+  // LLVM demangling: template parameter object for S{1, 2}
+  EXPECT_TRUE(Demangle("_ZTAXtl1SLi1ELi2EEE", tmp, sizeof(tmp)));
+  EXPECT_STREQ("template parameter object", tmp);
+}
+
+TEST(Demangle, EnableIfAttributeOnGlobalFunction) {
+  char tmp[80];
+
+  // int f(long l) __attribute__((enable_if(l >= 0, ""))) { return l; }
+  //
+  // f(long) [enable_if:fp >= 0]
+  EXPECT_TRUE(Demangle("_Z1fUa9enable_ifIXgefL0p_Li0EEEl", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f()", tmp);
+}
+
+TEST(Demangle, EnableIfAttributeOnNamespaceScopeFunction) {
+  char tmp[80];
+
+  // namespace ns {
+  // int f(long l) __attribute__((enable_if(l >= 0, ""))) { return l; }
+  // }  // namespace ns
+  //
+  // ns::f(long) [enable_if:fp >= 0]
+  EXPECT_TRUE(Demangle("_ZN2ns1fEUa9enable_ifIXgefL0p_Li0EEEl",
+              tmp, sizeof(tmp)));
+  EXPECT_STREQ("ns::f()", tmp);
+}
+
+TEST(Demangle, EnableIfAttributeOnFunctionTemplate) {
+  char tmp[80];
+
+  // template <class T>
+  // T f(T t) __attribute__((enable_if(t >= T{}, ""))) { return t; }
+  // template int f<int>(int);
+  //
+  // int f<int>(int) [enable_if:fp >= int{}]
+  EXPECT_TRUE(Demangle("_Z1fIiEUa9enable_ifIXgefL0p_tliEEET_S0_",
+              tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, ThisPointerInDependentSignature) {
+  char tmp[80];
+
+  // decltype(g<int>(this)) S::f<int>()
+  EXPECT_TRUE(Demangle("_ZN1S1fIiEEDTcl1gIT_EfpTEEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("S::f<>()", tmp);
+}
+
+TEST(Demangle, DependentMemberOperatorCall) {
+  char tmp[80];
+
+  // decltype(fp.operator()()) f<C>(C)
+  EXPECT_TRUE(Demangle("_Z1fI1CEDTcldtfp_onclEET_", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, TypeNestedUnderDecltype) {
+  char tmp[80];
+
+  // Source:
+  //
+  // template <class T> struct S { using t = int; };
+  // template <class T> decltype(S<T>{})::t f() { return {}; }
+  // void g() { f<int>(); }
+  //
+  // Full LLVM demangling of the instantiation of f:
+  //
+  // decltype(S<int>{})::t f<int>()
+  EXPECT_TRUE(Demangle("_Z1fIiENDTtl1SIT_EEE1tEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, ElaboratedTypes) {
+  char tmp[80];
+
+  // Source:
+  //
+  // template <class T> struct S { class C {}; };
+  // template <class T> void f(class S<T>::C) {}
+  // template void f<int>(class S<int>::C);
+  //
+  // LLVM demangling:
+  //
+  // void f<int>(struct S<int>::C)
+  EXPECT_TRUE(Demangle("_Z1fIiEvTsN1SIT_E1CE", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+
+  // The like for unions.
+  EXPECT_TRUE(Demangle("_Z1fIiEvTuN1SIT_E1CE", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+
+  // The like for enums.
+  EXPECT_TRUE(Demangle("_Z1fIiEvTeN1SIT_E1CE", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+// Test subobject-address template parameters.
+TEST(Demangle, SubobjectAddresses) {
+  char tmp[80];
+
+  // void f<a.<char const at offset 123>>()
+  EXPECT_TRUE(Demangle("_Z1fIXsoKcL_Z1aE123EEEvv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+
+  // void f<&a.<char const at offset 0>>()
+  EXPECT_TRUE(Demangle("_Z1fIXadsoKcL_Z1aEEEEvv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+
+  // void f<&a.<char const at offset 123>>()
+  EXPECT_TRUE(Demangle("_Z1fIXadsoKcL_Z1aE123EEEvv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+
+  // void f<&a.<char const at offset 123>>(), past the end this time
+  EXPECT_TRUE(Demangle("_Z1fIXadsoKcL_Z1aE123pEEEvv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+
+  // void f<&a.<char const at offset 0>>() with union-selectors
+  EXPECT_TRUE(Demangle("_Z1fIXadsoKcL_Z1aE__1_234EEEvv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+
+  // void f<&a.<char const at offset 123>>(), past the end, with union-selector
+  EXPECT_TRUE(Demangle("_Z1fIXadsoKcL_Z1aE123_456pEEEvv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, Preincrement) {
+  char tmp[80];
+
+  // Source:
+  //
+  // template <class T> auto f(T t) -> decltype(T{++t}) { return t; }
+  // template auto f<int>(int t) -> decltype(int{++t});
+  //
+  // Full LLVM demangling of the instantiation of f:
+  //
+  // decltype(int{++fp}) f<int>(int)
+  EXPECT_TRUE(Demangle("_Z1fIiEDTtlT_pp_fp_EES0_", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, Postincrement) {
+  char tmp[80];
+
+  // Source:
+  //
+  // template <class T> auto f(T t) -> decltype(T{t++}) { return t; }
+  // template auto f<int>(int t) -> decltype(int{t++});
+  //
+  // Full LLVM demangling of the instantiation of f:
+  //
+  // decltype(int{fp++}) f<int>(int)
+  EXPECT_TRUE(Demangle("_Z1fIiEDTtlT_ppfp_EES0_", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, Predecrement) {
+  char tmp[80];
+
+  // Source:
+  //
+  // template <class T> auto f(T t) -> decltype(T{--t}) { return t; }
+  // template auto f<int>(int t) -> decltype(int{--t});
+  //
+  // Full LLVM demangling of the instantiation of f:
+  //
+  // decltype(int{--fp}) f<int>(int)
+  EXPECT_TRUE(Demangle("_Z1fIiEDTtlT_mm_fp_EES0_", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, Postdecrement) {
+  char tmp[80];
+
+  // Source:
+  //
+  // template <class T> auto f(T t) -> decltype(T{t--}) { return t; }
+  // template auto f<int>(int t) -> decltype(int{t--});
+  //
+  // Full LLVM demangling of the instantiation of f:
+  //
+  // decltype(int{fp--}) f<int>(int)
+  EXPECT_TRUE(Demangle("_Z1fIiEDTtlT_mmfp_EES0_", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, UnaryFoldExpressions) {
+  char tmp[80];
+
+  // Source:
+  //
+  // template <bool b> struct S {};
+  //
+  // template <class... T> auto f(T... t) -> S<((sizeof(T) == 4) || ...)> {
+  //   return {};
+  // }
+  //
+  // void g() { f(1, 2L); }
+  //
+  // Full LLVM demangling of the instantiation of f:
+  //
+  // S<((sizeof (int) == 4, sizeof (long) == 4) || ...)> f<int, long>(int, long)
+  EXPECT_TRUE(Demangle("_Z1fIJilEE1SIXfrooeqstT_Li4EEEDpS1_",
+                       tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+
+  // The like with a left fold.
+  //
+  // S<(... || (sizeof (int) == 4, sizeof (long) == 4))> f<int, long>(int, long)
+  EXPECT_TRUE(Demangle("_Z1fIJilEE1SIXflooeqstT_Li4EEEDpS1_",
+                       tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, BinaryFoldExpressions) {
+  char tmp[80];
+
+  // Source:
+  //
+  // template <bool b> struct S {};
+  //
+  // template <class... T> auto f(T... t)
+  //     -> S<((sizeof(T) == 4) || ... || false)> {
+  //   return {};
+  // }
+  //
+  // void g() { f(1, 2L); }
+  //
+  // Full LLVM demangling of the instantiation of f:
+  //
+  // S<((sizeof (int) == 4, sizeof (long) == 4) || ... || false)>
+  // f<int, long>(int, long)
+  EXPECT_TRUE(Demangle("_Z1fIJilEE1SIXfRooeqstT_Li4ELb0EEEDpS1_",
+                       tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+
+  // The like with a left fold.
+  //
+  // S<(false || ... || (sizeof (int) == 4, sizeof (long) == 4))>
+  // f<int, long>(int, long)
+  EXPECT_TRUE(Demangle("_Z1fIJilEE1SIXfLooLb0EeqstT_Li4EEEDpS1_",
+                       tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, SizeofPacks) {
+  char tmp[80];
+
+  // template <size_t i> struct S {};
+  //
+  // template <class... T> auto f(T... p) -> S<sizeof...(T)> { return {}; }
+  // template auto f<int, long>(int, long) -> S<2>;
+  //
+  // template <class... T> auto g(T... p) -> S<sizeof...(p)> { return {}; }
+  // template auto g<int, long>(int, long) -> S<2>;
+
+  // S<sizeof...(int, long)> f<int, long>(int, long)
+  EXPECT_TRUE(Demangle("_Z1fIJilEE1SIXsZT_EEDpT_", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+
+  // S<sizeof... (fp)> g<int, long>(int, long)
+  EXPECT_TRUE(Demangle("_Z1gIJilEE1SIXsZfp_EEDpT_", tmp, sizeof(tmp)));
+  EXPECT_STREQ("g<>()", tmp);
+}
+
+TEST(Demangle, SizeofPackInvolvingAnAliasTemplate) {
+  char tmp[80];
+
+  // Source:
+  //
+  // template <class... T> using A = char[sizeof...(T)];
+  // template <class... U> void f(const A<U..., int>&) {}
+  // template void f<int>(const A<int, int>&);
+  //
+  // Full LLVM demangling of the instantiation of f:
+  //
+  // void f<int>(char const (&) [sizeof... (int, int)])
+  EXPECT_TRUE(Demangle("_Z1fIJiEEvRAsPDpT_iE_Kc", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, UserDefinedLiteral) {
+  char tmp[80];
+
+  // Source:
+  //
+  // unsigned long long operator""_lit(unsigned long long x) { return x; }
+  //
+  // LLVM demangling:
+  //
+  // operator"" _lit(unsigned long long)
+  EXPECT_TRUE(Demangle("_Zli4_lity", tmp, sizeof(tmp)));
+  EXPECT_STREQ("operator\"\" _lit()", tmp);
+}
+
+TEST(Demangle, Spaceship) {
+  char tmp[80];
+
+  // #include <compare>
+  //
+  // struct S { auto operator<=>(const S&) const = default; };
+  // auto (S::*f) = &S::operator<=>;  // make sure S::operator<=> is emitted
+  //
+  // template <class T> auto g(T x, T y) -> decltype(x <=> y) {
+  //   return x <=> y;
+  // }
+  // template auto g<S>(S x, S y) -> decltype(x <=> y);
+
+  // S::operator<=>(S const&) const
+  EXPECT_TRUE(Demangle("_ZNK1SssERKS_", tmp, sizeof(tmp)));
+  EXPECT_STREQ("S::operator<=>()", tmp);
+
+  // decltype(fp <=> fp0) g<S>(S, S)
+  EXPECT_TRUE(Demangle("_Z1gI1SEDTssfp_fp0_ET_S2_", tmp, sizeof(tmp)));
+  EXPECT_STREQ("g<>()", tmp);
+}
+
+TEST(Demangle, CoAwait) {
+  char tmp[80];
+
+  // ns::Awaitable::operator co_await() const
+  EXPECT_TRUE(Demangle("_ZNK2ns9AwaitableawEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("ns::Awaitable::operator co_await()", tmp);
+}
+
+TEST(Demangle, VendorExtendedExpressions) {
+  char tmp[80];
+
+  // void f<__e()>()
+  EXPECT_TRUE(Demangle("_Z1fIXu3__eEEEvv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+
+  // void f<__e(int, long)>()
+  EXPECT_TRUE(Demangle("_Z1fIXu3__eilEEEvv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, DirectListInitialization) {
+  char tmp[80];
+
+  // template <class T> decltype(T{}) f() { return T{}; }
+  // template decltype(int{}) f<int>();
+  //
+  // struct XYZ { int x, y, z; };
+  // template <class T> decltype(T{1, 2, 3}) g() { return T{1, 2, 3}; }
+  // template decltype(XYZ{1, 2, 3}) g<XYZ>();
+  //
+  // template <class T> decltype(T{.x = 1, .y = 2, .z = 3}) h() {
+  //   return T{.x = 1, .y = 2, .z = 3};
+  // }
+  // template decltype(XYZ{.x = 1, .y = 2, .z = 3}) h<XYZ>();
+  //
+  // // The following two cases require full C99 designated initializers,
+  // // not part of C++ but likely available as an extension if you ask your
+  // // compiler nicely.
+  //
+  // struct A { int a[4]; };
+  // template <class T> decltype(T{.a[2] = 42}) i() { return T{.a[2] = 42}; }
+  // template decltype(A{.a[2] = 42}) i<A>();
+  //
+  // template <class T> decltype(T{.a[1 ... 3] = 42}) j() {
+  //   return T{.a[1 ... 3] = 42};
+  // }
+  // template decltype(A{.a[1 ... 3] = 42}) j<A>();
+
+  // decltype(int{}) f<int>()
+  EXPECT_TRUE(Demangle("_Z1fIiEDTtlT_EEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+
+  // decltype(XYZ{1, 2, 3}) g<XYZ>()
+  EXPECT_TRUE(Demangle("_Z1gI3XYZEDTtlT_Li1ELi2ELi3EEEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("g<>()", tmp);
+
+  // decltype(XYZ{.x = 1, .y = 2, .z = 3}) h<XYZ>()
+  EXPECT_TRUE(Demangle("_Z1hI3XYZEDTtlT_di1xLi1Edi1yLi2Edi1zLi3EEEv",
+                       tmp, sizeof(tmp)));
+  EXPECT_STREQ("h<>()", tmp);
+
+  // decltype(A{.a[2] = 42}) i<A>()
+  EXPECT_TRUE(Demangle("_Z1iI1AEDTtlT_di1adxLi2ELi42EEEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("i<>()", tmp);
+
+  // decltype(A{.a[1 ... 3] = 42}) j<A>()
+  EXPECT_TRUE(Demangle("_Z1jI1AEDTtlT_di1adXLi1ELi3ELi42EEEv",
+                       tmp, sizeof(tmp)));
+  EXPECT_STREQ("j<>()", tmp);
+}
+
+TEST(Demangle, SimpleInitializerLists) {
+  char tmp[80];
+
+  // Common preamble of source-code examples in this test function:
+  //
+  // #include <initializer_list>
+  //
+  // template <class T> void g(std::initializer_list<T>) {}
+
+  // Source:
+  //
+  // template <class T> auto f() -> decltype(g<T>({})) {}
+  // template auto f<int>() -> decltype(g<int>({}));
+  //
+  // Full LLVM demangling of the instantiation of f:
+  //
+  // decltype(g<int>({})) f<int>()
+  EXPECT_TRUE(Demangle("_Z1fIiEDTcl1gIT_EilEEEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+
+  // Source:
+  //
+  // template <class T> auto f(T x) -> decltype(g({x})) {}
+  // template auto f<int>(int x) -> decltype(g({x}));
+  //
+  // Full LLVM demangling of the instantiation of f:
+  //
+  // decltype(g({fp})) f<int>(int)
+  EXPECT_TRUE(Demangle("_Z1fIiEDTcl1gilfp_EEET_", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+
+  // Source:
+  //
+  // template <class T> auto f(T x, T y) -> decltype(g({x, y})) {}
+  // template auto f<int>(int x, int y) -> decltype(g({x, y}));
+  //
+  // Full LLVM demangling of the instantiation of f:
+  //
+  // decltype(g({fp, fp0})) f<int>(int, int)
+  EXPECT_TRUE(Demangle("_Z1fIiEDTcl1gilfp_fp0_EEET_S1_", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, BracedListImplicitlyConstructingAClassObject) {
+  char tmp[80];
+
+  // Source:
+  //
+  // struct S { int v; };
+  // void g(S) {}
+  // template <class T> auto f(T x) -> decltype(g({.v = x})) {}
+  // template auto f<int>(int x) -> decltype(g({.v = x}));
+  //
+  // Full LLVM demangling of the instantiation of f:
+  //
+  // decltype(g({.v = fp})) f<int>(int)
+  EXPECT_TRUE(Demangle("_Z1fIiEDTcl1gildi1vfp_EEET_", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, SimpleNewExpression) {
+  char tmp[80];
+
+  // Source:
+  //
+  // template <class T> decltype(T{*new T}) f() { return T{}; }
+  // template decltype(int{*new int}) f<int>();
+  //
+  // Full LLVM demangling of the instantiation of f:
+  //
+  // decltype(int{*(new int)}) f<int>()
+  EXPECT_TRUE(Demangle("_Z1fIiEDTtlT_denw_S0_EEEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, NewExpressionWithEmptyParentheses) {
+  char tmp[80];
+
+  // Source:
+  //
+  // template <class T> decltype(T{*new T()}) f() { return T{}; }
+  // template decltype(int{*new int()}) f<int>();
+  //
+  // Full LLVM demangling of the instantiation of f:
+  //
+  // decltype(int{*(new int)}) f<int>()
+  EXPECT_TRUE(Demangle("_Z1fIiEDTtlT_denw_S0_piEEEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, NewExpressionWithNonemptyParentheses) {
+  char tmp[80];
+
+  // Source:
+  //
+  // template <class T> decltype(T{*new T(42)}) f() { return T{}; }
+  // template decltype(int{*new int(42)}) f<int>();
+  //
+  // Full LLVM demangling of the instantiation of f:
+  //
+  // decltype(int{*(new int(42))}) f<int>()
+  EXPECT_TRUE(Demangle("_Z1fIiEDTtlT_denw_S0_piLi42EEEEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, PlacementNewExpression) {
+  char tmp[80];
+
+  // Source:
+  //
+  // #include <new>
+  //
+  // template <class T> auto f(T t) -> decltype(T{*new (&t) T(42)}) {
+  //   return t;
+  // }
+  // template auto f<int>(int t) -> decltype(int{*new (&t) int(42)});
+  //
+  // Full LLVM demangling of the instantiation of f:
+  //
+  // decltype(int{*(new(&fp) int(42))}) f<int>(int)
+  EXPECT_TRUE(Demangle("_Z1fIiEDTtlT_denwadfp__S0_piLi42EEEES0_",
+                       tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, GlobalScopeNewExpression) {
+  char tmp[80];
+
+  // Source:
+  //
+  // template <class T> decltype(T{*::new T}) f() { return T{}; }
+  // template decltype(int{*::new int}) f<int>();
+  //
+  // Full LLVM demangling of the instantiation of f:
+  //
+  // decltype(int{*(::new int)}) f<int>()
+  EXPECT_TRUE(Demangle("_Z1fIiEDTtlT_degsnw_S0_EEEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, NewExpressionWithEmptyBraces) {
+  char tmp[80];
+
+  // Source:
+  //
+  // template <class T> decltype(T{*new T{}}) f() { return T{}; }
+  // template decltype(int{*new int{}}) f<int>();
+  //
+  // GNU demangling:
+  //
+  // decltype (int{*(new int{})}) f<int>()
+  EXPECT_TRUE(Demangle("_Z1fIiEDTtlT_denw_S0_ilEEEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, NewExpressionWithNonemptyBraces) {
+  char tmp[80];
+
+  // Source:
+  //
+  // template <class T> decltype(T{*new T{42}}) f() { return T{}; }
+  // template decltype(int{*new int{42}}) f<int>();
+  //
+  // GNU demangling:
+  //
+  // decltype (int{*(new int{42})}) f<int>()
+  EXPECT_TRUE(Demangle("_Z1fIiEDTtlT_denw_S0_ilLi42EEEEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, SimpleArrayNewExpression) {
+  char tmp[80];
+
+  // Source:
+  //
+  // template <class T> decltype(T{*new T[1]}) f() { return T{}; }
+  // template decltype(int{*new int[1]}) f<int>();
+  //
+  // Full LLVM demangling of the instantiation of f:
+  //
+  // decltype(int{*(new[] int)}) f<int>()
+  EXPECT_TRUE(Demangle("_Z1fIiEDTtlT_dena_S0_EEEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, ArrayNewExpressionWithEmptyParentheses) {
+  char tmp[80];
+
+  // Source:
+  //
+  // template <class T> decltype(T{*new T[1]()}) f() { return T{}; }
+  // template decltype(int{*new int[1]()}) f<int>();
+  //
+  // Full LLVM demangling of the instantiation of f:
+  //
+  // decltype(int{*(new[] int)}) f<int>()
+  EXPECT_TRUE(Demangle("_Z1fIiEDTtlT_dena_S0_piEEEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, ArrayPlacementNewExpression) {
+  char tmp[80];
+
+  // Source:
+  //
+  // #include <new>
+  //
+  // template <class T> auto f(T t) -> decltype(T{*new (&t) T[1]}) {
+  //   return T{};
+  // }
+  // template auto f<int>(int t) -> decltype(int{*new (&t) int[1]});
+  //
+  // Full LLVM demangling of the instantiation of f:
+  //
+  // decltype(int{*(new[](&fp) int)}) f<int>(int)
+  EXPECT_TRUE(Demangle("_Z1fIiEDTtlT_denaadfp__S0_EEES0_", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, GlobalScopeArrayNewExpression) {
+  char tmp[80];
+
+  // Source:
+  //
+  // template <class T> decltype(T{*::new T[1]}) f() { return T{}; }
+  // template decltype(int{*::new int[1]}) f<int>();
+  //
+  // Full LLVM demangling of the instantiation of f:
+  //
+  // decltype(int{*(::new[] int)}) f<int>()
+  EXPECT_TRUE(Demangle("_Z1fIiEDTtlT_degsna_S0_EEEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, ArrayNewExpressionWithTwoElementsInBraces) {
+  char tmp[80];
+
+  // Source:
+  //
+  // template <class T> decltype(T{*new T[2]{1, 2}}) f() { return T{}; }
+  // template decltype(int{*new int[2]{1, 2}}) f<int>();
+  //
+  // GNU demangling:
+  //
+  // decltype (int{*(new int{1, 2})}) f<int>()
+  EXPECT_TRUE(Demangle("_Z1fIiEDTtlT_dena_S0_ilLi1ELi2EEEEv",
+                       tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, SimpleDeleteExpression) {
+  char tmp[80];
+
+  // Source:
+  //
+  // template <class T> auto f(T* p) -> decltype(delete p) {}
+  // template auto f<int>(int* p) -> decltype(delete p);
+  //
+  // LLVM demangling:
+  //
+  // decltype(delete fp) f<int>(int*)
+  EXPECT_TRUE(Demangle("_Z1fIiEDTdlfp_EPT_", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, GlobalScopeDeleteExpression) {
+  char tmp[80];
+
+  // Source:
+  //
+  // template <class T> auto f(T* p) -> decltype(::delete p) {}
+  // template auto f<int>(int* p) -> decltype(::delete p);
+  //
+  // LLVM demangling:
+  //
+  // decltype(::delete fp) f<int>(int*)
+  EXPECT_TRUE(Demangle("_Z1fIiEDTgsdlfp_EPT_", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, SimpleArrayDeleteExpression) {
+  char tmp[80];
+
+  // Source:
+  //
+  // template <class T> auto f(T* a) -> decltype(delete[] a) {}
+  // template auto f<int>(int* a) -> decltype(delete[] a);
+  //
+  // LLVM demangling:
+  //
+  // decltype(delete[] fp) f<int>(int*)
+  EXPECT_TRUE(Demangle("_Z1fIiEDTdafp_EPT_", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, GlobalScopeArrayDeleteExpression) {
+  char tmp[80];
+
+  // Source:
+  //
+  // template <class T> auto f(T* a) -> decltype(::delete[] a) {}
+  // template auto f<int>(int* a) -> decltype(::delete[] a);
+  //
+  // LLVM demangling:
+  //
+  // decltype(::delete[] fp) f<int>(int*)
+  EXPECT_TRUE(Demangle("_Z1fIiEDTgsdafp_EPT_", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, ReferenceQualifiedFunctionTypes) {
+  char tmp[80];
+
+  // void f(void (*)() const &, int)
+  EXPECT_TRUE(Demangle("_Z1fPKFvvREi", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f()", tmp);
+
+  // void f(void (*)() &&, int)
+  EXPECT_TRUE(Demangle("_Z1fPFvvOEi", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f()", tmp);
+
+  // void f(void (*)(int&) &, int)
+  EXPECT_TRUE(Demangle("_Z1fPFvRiREi", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f()", tmp);
+
+  // void f(void (*)(S&&) &&, int)
+  EXPECT_TRUE(Demangle("_Z1fPFvO1SOEi", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f()", tmp);
+}
+
+TEST(Demangle, DynamicCast) {
+  char tmp[80];
+
+  // Source:
+  //
+  // template <class T> auto f(T* p) -> decltype(dynamic_cast<const T*>(p)) {
+  //   return p;
+  // }
+  // struct S {};
+  // void g(S* p) { f(p); }
+  //
+  // Full LLVM demangling of the instantiation of f:
+  //
+  // decltype(dynamic_cast<S const*>(fp)) f<S>(S*)
+  EXPECT_TRUE(Demangle("_Z1fI1SEDTdcPKT_fp_EPS1_", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, StaticCast) {
+  char tmp[80];
+
+  // Source:
+  //
+  // template <class T> auto f(T* p) -> decltype(static_cast<const T*>(p)) {
+  //   return p;
+  // }
+  // void g(int* p) { f(p); }
+  //
+  // Full LLVM demangling of the instantiation of f:
+  //
+  // decltype(static_cast<int const*>(fp)) f<int>(int*)
+  EXPECT_TRUE(Demangle("_Z1fIiEDTscPKT_fp_EPS0_", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, ConstCast) {
+  char tmp[80];
+
+  // Source:
+  //
+  // template <class T> auto f(T* p) -> decltype(const_cast<const T*>(p)) {
+  //   return p;
+  // }
+  // void g(int* p) { f(p); }
+  //
+  // Full LLVM demangling of the instantiation of f:
+  //
+  // decltype(const_cast<int const*>(fp)) f<int>(int*)
+  EXPECT_TRUE(Demangle("_Z1fIiEDTccPKT_fp_EPS0_", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, ReinterpretCast) {
+  char tmp[80];
+
+  // Source:
+  //
+  // template <class T> auto f(T* p)
+  //     -> decltype(reinterpret_cast<const T*>(p)) {
+  //   return p;
+  // }
+  // void g(int* p) { f(p); }
+  //
+  // Full LLVM demangling of the instantiation of f:
+  //
+  // decltype(reinterpret_cast<int const*>(fp)) f<int>(int*)
+  EXPECT_TRUE(Demangle("_Z1fIiEDTrcPKT_fp_EPS0_", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, TypeidType) {
+  char tmp[80];
+
+  // Source:
+  //
+  // #include <typeinfo>
+  //
+  // template <class T> decltype(typeid(T).name()) f(T) { return nullptr; }
+  // template decltype(typeid(int).name()) f<int>(int);
+  //
+  // Full LLVM demangling of the instantiation of f:
+  //
+  // decltype(typeid (int).name()) f<int>(int)
+  EXPECT_TRUE(Demangle("_Z1fIiEDTcldttiT_4nameEES0_", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, TypeidExpression) {
+  char tmp[80];
+
+  // Source:
+  //
+  // #include <typeinfo>
+  //
+  // template <class T> decltype(typeid(T{}).name()) f(T) { return nullptr; }
+  // template decltype(typeid(int{}).name()) f<int>(int);
+  //
+  // Full LLVM demangling of the instantiation of f:
+  //
+  // decltype(typeid (int{}).name()) f<int>(int)
+  EXPECT_TRUE(Demangle("_Z1fIiEDTcldttetlT_E4nameEES0_", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, AlignofType) {
+  char tmp[80];
+
+  // Source:
+  //
+  // template <class T> T f(T (&a)[alignof(T)]) { return a[0]; }
+  // template int f<int>(int (&)[alignof(int)]);
+  //
+  // Full LLVM demangling of the instantiation of f:
+  //
+  // int f<int>(int (&) [alignof (int)])
+  EXPECT_TRUE(Demangle("_Z1fIiET_RAatS0__S0_", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, AlignofExpression) {
+  char tmp[80];
+
+  // Source (note that this uses a GNU extension; it is not standard C++):
+  //
+  // template <class T> T f(T (&a)[alignof(T{})]) { return a[0]; }
+  // template int f<int>(int (&)[alignof(int{})]);
+  //
+  // Full LLVM demangling of the instantiation of f:
+  //
+  // int f<int>(int (&) [alignof (int{})])
+  EXPECT_TRUE(Demangle("_Z1fIiET_RAaztlS0_E_S0_", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, NoexceptExpression) {
+  char tmp[80];
+
+  // Source:
+  //
+  // template <class T> void f(T (&a)[noexcept(T{})]) {}
+  // template void f<int>(int (&)[noexcept(int{})]);
+  //
+  // Full LLVM demangling of the instantiation of f:
+  //
+  // void f<int>(int (&) [noexcept (int{})])
+  EXPECT_TRUE(Demangle("_Z1fIiEvRAnxtlT_E_S0_", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, UnaryThrow) {
+  char tmp[80];
+
+  // Source:
+  //
+  // template <bool b> decltype(b ? throw b : 0) f() { return 0; }
+  // template decltype(false ? throw false : 0) f<false>();
+  //
+  // Full LLVM demangling of the instantiation of f:
+  //
+  // decltype(false ? throw false : 0) f<false>()
+  EXPECT_TRUE(Demangle("_Z1fILb0EEDTquT_twT_Li0EEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, NullaryThrow) {
+  char tmp[80];
+
+  // Source:
+  //
+  // template <bool b> decltype(b ? throw : 0) f() { return 0; }
+  // template decltype(false ? throw : 0) f<false>();
+  //
+  // Full LLVM demangling of the instantiation of f:
+  //
+  // decltype(false ? throw : 0) f<false>()
+  EXPECT_TRUE(Demangle("_Z1fILb0EEDTquT_trLi0EEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+}
+
+TEST(Demangle, ThreadLocalWrappers) {
+  char tmp[80];
+
+  EXPECT_TRUE(Demangle("_ZTWN2ns3varE", tmp, sizeof(tmp)));
+  EXPECT_STREQ("thread-local wrapper routine for ns::var", tmp);
+
+  EXPECT_TRUE(Demangle("_ZTHN2ns3varE", tmp, sizeof(tmp)));
+  EXPECT_STREQ("thread-local initialization routine for ns::var", tmp);
+}
+
+TEST(Demangle, DubiousSrStSymbols) {
+  char tmp[80];
+
+  // GNU demangling (not accepted by LLVM):
+  //
+  // S<std::u<char>::v> f<char>()
+  EXPECT_TRUE(Demangle("_Z1fIcE1SIXsrSt1uIT_E1vEEv", tmp, sizeof(tmp)));
+  EXPECT_STREQ("f<>()", tmp);
+
+  // A real case from the wild.
+  //
+  // GNU demangling (not accepted by LLVM) with line breaks and indentation
+  // added for readability:
+  //
+  // __gnu_cxx::__enable_if<std::__is_char<char>::__value, bool>::__type
+  // std::operator==<char>(
+  //     std::__cxx11::basic_string<char, std::char_traits<char>,
+  //                                std::allocator<char> > const&,
+  //     std::__cxx11::basic_string<char, std::char_traits<char>,
+  //                                std::allocator<char> > const&)
+  EXPECT_TRUE(Demangle(
+      "_ZSteqIcEN9__gnu_cxx11__enable_if"
+      "IXsrSt9__is_charIT_E7__valueEbE"
+      "6__typeE"
+      "RKNSt7__cxx1112basic_stringIS3_St11char_traitsIS3_ESaIS3_EEESE_",
+      tmp, sizeof(tmp)));
+  EXPECT_STREQ("std::operator==<>()", tmp);
+}
+
+// Test one Rust symbol to exercise Demangle's delegation path.  Rust demangling
+// itself is more thoroughly tested in demangle_rust_test.cc.
+TEST(Demangle, DelegatesToDemangleRustSymbolEncoding) {
+  char tmp[80];
+
+  EXPECT_TRUE(Demangle("_RNvC8my_crate7my_func", tmp, sizeof(tmp)));
+  EXPECT_STREQ("my_crate::my_func", tmp);
+}
+
 // Tests that verify that Demangle footprint is within some limit.
 // They are not to be run under sanitizers as the sanitizers increase
 // stack consumption by about 4x.
diff --git a/absl/debugging/internal/elf_mem_image.cc b/absl/debugging/internal/elf_mem_image.cc
index 42dcd3c..2c16830 100644
--- a/absl/debugging/internal/elf_mem_image.cc
+++ b/absl/debugging/internal/elf_mem_image.cc
@@ -20,8 +20,11 @@
 #ifdef ABSL_HAVE_ELF_MEM_IMAGE  // defined in elf_mem_image.h
 
 #include <string.h>
+
 #include <cassert>
 #include <cstddef>
+#include <cstdint>
+
 #include "absl/base/config.h"
 #include "absl/base/internal/raw_logging.h"
 
@@ -86,20 +89,14 @@
   Init(base);
 }
 
-int ElfMemImage::GetNumSymbols() const {
-  if (!hash_) {
-    return 0;
-  }
-  // See http://www.caldera.com/developers/gabi/latest/ch5.dynamic.html#hash
-  return static_cast<int>(hash_[1]);
-}
+uint32_t ElfMemImage::GetNumSymbols() const { return num_syms_; }
 
-const ElfW(Sym) *ElfMemImage::GetDynsym(int index) const {
+const ElfW(Sym) * ElfMemImage::GetDynsym(uint32_t index) const {
   ABSL_RAW_CHECK(index < GetNumSymbols(), "index out of range");
   return dynsym_ + index;
 }
 
-const ElfW(Versym) *ElfMemImage::GetVersym(int index) const {
+const ElfW(Versym) *ElfMemImage::GetVersym(uint32_t index) const {
   ABSL_RAW_CHECK(index < GetNumSymbols(), "index out of range");
   return versym_ + index;
 }
@@ -154,7 +151,7 @@
   dynstr_    = nullptr;
   versym_    = nullptr;
   verdef_    = nullptr;
-  hash_      = nullptr;
+  num_syms_ = 0;
   strsize_   = 0;
   verdefnum_ = 0;
   // Sentinel: PT_LOAD .p_vaddr can't possibly be this.
@@ -219,12 +216,17 @@
       base_as_char - reinterpret_cast<const char *>(link_base_);
   ElfW(Dyn)* dynamic_entry = reinterpret_cast<ElfW(Dyn)*>(
       static_cast<intptr_t>(dynamic_program_header->p_vaddr) + relocation);
+  uint32_t *sysv_hash = nullptr;
+  uint32_t *gnu_hash = nullptr;
   for (; dynamic_entry->d_tag != DT_NULL; ++dynamic_entry) {
     const auto value =
         static_cast<intptr_t>(dynamic_entry->d_un.d_val) + relocation;
     switch (dynamic_entry->d_tag) {
       case DT_HASH:
-        hash_ = reinterpret_cast<ElfW(Word) *>(value);
+        sysv_hash = reinterpret_cast<uint32_t *>(value);
+        break;
+      case DT_GNU_HASH:
+        gnu_hash = reinterpret_cast<uint32_t *>(value);
         break;
       case DT_SYMTAB:
         dynsym_ = reinterpret_cast<ElfW(Sym) *>(value);
@@ -249,13 +251,38 @@
         break;
     }
   }
-  if (!hash_ || !dynsym_ || !dynstr_ || !versym_ ||
+  if ((!sysv_hash && !gnu_hash) || !dynsym_ || !dynstr_ || !versym_ ||
       !verdef_ || !verdefnum_ || !strsize_) {
     assert(false);  // invalid VDSO
     // Mark this image as not present. Can not recur infinitely.
     Init(nullptr);
     return;
   }
+  if (sysv_hash) {
+    num_syms_ = sysv_hash[1];
+  } else {
+    assert(gnu_hash);
+    // Compute the number of symbols for DT_GNU_HASH, which is specified by
+    // https://sourceware.org/gnu-gabi/program-loading-and-dynamic-linking.txt
+    uint32_t nbuckets = gnu_hash[0];
+    // The buckets array is located after the header (4 uint32) and the bloom
+    // filter (size_t array of gnu_hash[2] elements).
+    uint32_t *buckets = gnu_hash + 4 + sizeof(size_t) / 4 * gnu_hash[2];
+    // Find the chain of the last non-empty bucket.
+    uint32_t idx = 0;
+    for (uint32_t i = nbuckets; i > 0;) {
+      idx = buckets[--i];
+      if (idx != 0) break;
+    }
+    if (idx != 0) {
+      // Find the last element of the chain, which has an odd value.
+      // Add one to get the number of symbols.
+      uint32_t *chain = buckets + nbuckets - gnu_hash[1];
+      while (chain[idx++] % 2 == 0) {
+      }
+    }
+    num_syms_ = idx;
+  }
 }
 
 bool ElfMemImage::LookupSymbol(const char *name,
@@ -300,9 +327,9 @@
   return false;
 }
 
-ElfMemImage::SymbolIterator::SymbolIterator(const void *const image, int index)
-    : index_(index), image_(image) {
-}
+ElfMemImage::SymbolIterator::SymbolIterator(const void *const image,
+                                            uint32_t index)
+    : index_(index), image_(image) {}
 
 const ElfMemImage::SymbolInfo *ElfMemImage::SymbolIterator::operator->() const {
   return &info_;
@@ -335,7 +362,7 @@
   return SymbolIterator(this, GetNumSymbols());
 }
 
-void ElfMemImage::SymbolIterator::Update(int increment) {
+void ElfMemImage::SymbolIterator::Update(uint32_t increment) {
   const ElfMemImage *image = reinterpret_cast<const ElfMemImage *>(image_);
   ABSL_RAW_CHECK(image->IsPresent() || increment == 0, "");
   if (!image->IsPresent()) {
diff --git a/absl/debugging/internal/elf_mem_image.h b/absl/debugging/internal/elf_mem_image.h
index e7fe6ab..19c4952 100644
--- a/absl/debugging/internal/elf_mem_image.h
+++ b/absl/debugging/internal/elf_mem_image.h
@@ -22,6 +22,7 @@
 // Including this will define the __GLIBC__ macro if glibc is being
 // used.
 #include <climits>
+#include <cstdint>
 
 #include "absl/base/config.h"
 
@@ -82,10 +83,10 @@
     bool operator!=(const SymbolIterator &rhs) const;
     bool operator==(const SymbolIterator &rhs) const;
    private:
-    SymbolIterator(const void *const image, int index);
-    void Update(int incr);
+    SymbolIterator(const void *const image, uint32_t index);
+    void Update(uint32_t incr);
     SymbolInfo info_;
-    int index_;
+    uint32_t index_;
     const void *const image_;
   };
 
@@ -94,14 +95,14 @@
   void                 Init(const void *base);
   bool                 IsPresent() const { return ehdr_ != nullptr; }
   const ElfW(Phdr)*    GetPhdr(int index) const;
-  const ElfW(Sym)*     GetDynsym(int index) const;
-  const ElfW(Versym)*  GetVersym(int index) const;
+  const ElfW(Sym) * GetDynsym(uint32_t index) const;
+  const ElfW(Versym)*  GetVersym(uint32_t index) const;
   const ElfW(Verdef)*  GetVerdef(int index) const;
   const ElfW(Verdaux)* GetVerdefAux(const ElfW(Verdef) *verdef) const;
   const char*          GetDynstr(ElfW(Word) offset) const;
   const void*          GetSymAddr(const ElfW(Sym) *sym) const;
   const char*          GetVerstr(ElfW(Word) offset) const;
-  int                  GetNumSymbols() const;
+  uint32_t GetNumSymbols() const;
 
   SymbolIterator begin() const;
   SymbolIterator end() const;
@@ -124,8 +125,8 @@
   const ElfW(Sym) *dynsym_;
   const ElfW(Versym) *versym_;
   const ElfW(Verdef) *verdef_;
-  const ElfW(Word) *hash_;
   const char *dynstr_;
+  uint32_t num_syms_;
   size_t strsize_;
   size_t verdefnum_;
   ElfW(Addr) link_base_;     // Link-time base (p_vaddr of first PT_LOAD).
diff --git a/absl/debugging/internal/stacktrace_aarch64-inl.inc b/absl/debugging/internal/stacktrace_aarch64-inl.inc
index 1caf7bb..b123479 100644
--- a/absl/debugging/internal/stacktrace_aarch64-inl.inc
+++ b/absl/debugging/internal/stacktrace_aarch64-inl.inc
@@ -89,6 +89,8 @@
 
 static bool InsideSignalStack(void** ptr, const StackInfo* stack_info) {
   uintptr_t comparable_ptr = reinterpret_cast<uintptr_t>(ptr);
+  if (stack_info->sig_stack_high == kUnknownStackEnd)
+    return false;
   return (comparable_ptr >= stack_info->sig_stack_low &&
           comparable_ptr < stack_info->sig_stack_high);
 }
@@ -122,13 +124,6 @@
       if (pre_signal_frame_pointer >= old_frame_pointer) {
         new_frame_pointer = pre_signal_frame_pointer;
       }
-      // Check that alleged frame pointer is actually readable. This is to
-      // prevent "double fault" in case we hit the first fault due to e.g.
-      // stack corruption.
-      if (!absl::debugging_internal::AddressIsReadable(
-              new_frame_pointer))
-        return nullptr;
-    }
   }
 #endif
 
@@ -136,6 +131,14 @@
   if ((reinterpret_cast<uintptr_t>(new_frame_pointer) & 7) != 0)
     return nullptr;
 
+  // Check that alleged frame pointer is actually readable. This is to
+  // prevent "double fault" in case we hit the first fault due to e.g.
+  // stack corruption.
+  if (!absl::debugging_internal::AddressIsReadable(
+          new_frame_pointer))
+    return nullptr;
+  }
+
   // Only check the size if both frames are in the same stack.
   if (InsideSignalStack(new_frame_pointer, stack_info) ==
       InsideSignalStack(old_frame_pointer, stack_info)) {
diff --git a/absl/debugging/internal/utf8_for_code_point.cc b/absl/debugging/internal/utf8_for_code_point.cc
new file mode 100644
index 0000000..658a3b5
--- /dev/null
+++ b/absl/debugging/internal/utf8_for_code_point.cc
@@ -0,0 +1,70 @@
+// Copyright 2024 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/debugging/internal/utf8_for_code_point.h"
+
+#include <cstdint>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+namespace {
+
+// UTF-8 encoding bounds.
+constexpr uint32_t kMinSurrogate = 0xd800, kMaxSurrogate = 0xdfff;
+constexpr uint32_t kMax1ByteCodePoint = 0x7f;
+constexpr uint32_t kMax2ByteCodePoint = 0x7ff;
+constexpr uint32_t kMax3ByteCodePoint = 0xffff;
+constexpr uint32_t kMaxCodePoint = 0x10ffff;
+
+}  // namespace
+
+Utf8ForCodePoint::Utf8ForCodePoint(uint64_t code_point) {
+  if (code_point <= kMax1ByteCodePoint) {
+    length = 1;
+    bytes[0] = static_cast<char>(code_point);
+    return;
+  }
+
+  if (code_point <= kMax2ByteCodePoint) {
+    length = 2;
+    bytes[0] = static_cast<char>(0xc0 | (code_point >> 6));
+    bytes[1] = static_cast<char>(0x80 | (code_point & 0x3f));
+    return;
+  }
+
+  if (kMinSurrogate <= code_point && code_point <= kMaxSurrogate) return;
+
+  if (code_point <= kMax3ByteCodePoint) {
+    length = 3;
+    bytes[0] = static_cast<char>(0xe0 | (code_point >> 12));
+    bytes[1] = static_cast<char>(0x80 | ((code_point >> 6) & 0x3f));
+    bytes[2] = static_cast<char>(0x80 | (code_point & 0x3f));
+    return;
+  }
+
+  if (code_point > kMaxCodePoint) return;
+
+  length = 4;
+  bytes[0] = static_cast<char>(0xf0 | (code_point >> 18));
+  bytes[1] = static_cast<char>(0x80 | ((code_point >> 12) & 0x3f));
+  bytes[2] = static_cast<char>(0x80 | ((code_point >> 6) & 0x3f));
+  bytes[3] = static_cast<char>(0x80 | (code_point & 0x3f));
+}
+
+}  // namespace debugging_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/absl/debugging/internal/utf8_for_code_point.h b/absl/debugging/internal/utf8_for_code_point.h
new file mode 100644
index 0000000..f23cde6
--- /dev/null
+++ b/absl/debugging/internal/utf8_for_code_point.h
@@ -0,0 +1,47 @@
+// Copyright 2024 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_DEBUGGING_INTERNAL_UTF8_FOR_CODE_POINT_H_
+#define ABSL_DEBUGGING_INTERNAL_UTF8_FOR_CODE_POINT_H_
+
+#include <cstdint>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+
+struct Utf8ForCodePoint {
+  // Converts a Unicode code point to the corresponding UTF-8 byte sequence.
+  // Async-signal-safe to support use in symbolizing stack traces from a signal
+  // handler.
+  explicit Utf8ForCodePoint(uint64_t code_point);
+
+  // Returns true if the constructor's code_point argument was valid.
+  bool ok() const { return length != 0; }
+
+  // If code_point was in range, then 1 <= length <= 4, and the UTF-8 encoding
+  // is found in bytes[0 .. (length - 1)].  If code_point was invalid, then
+  // length == 0.  In either case, the contents of bytes[length .. 3] are
+  // unspecified.
+  char bytes[4] = {};
+  uint32_t length = 0;
+};
+
+}  // namespace debugging_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_DEBUGGING_INTERNAL_UTF8_FOR_CODE_POINT_H_
diff --git a/absl/debugging/internal/utf8_for_code_point_test.cc b/absl/debugging/internal/utf8_for_code_point_test.cc
new file mode 100644
index 0000000..dd0591a
--- /dev/null
+++ b/absl/debugging/internal/utf8_for_code_point_test.cc
@@ -0,0 +1,175 @@
+// Copyright 2024 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/debugging/internal/utf8_for_code_point.h"
+
+#include <cstdint>
+
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace debugging_internal {
+namespace {
+
+TEST(Utf8ForCodePointTest, RecognizesTheSmallestCodePoint) {
+  Utf8ForCodePoint utf8(uint64_t{0});
+  ASSERT_EQ(utf8.length, 1);
+  EXPECT_EQ(utf8.bytes[0], '\0');
+}
+
+TEST(Utf8ForCodePointTest, RecognizesAsciiSmallA) {
+  Utf8ForCodePoint utf8(uint64_t{'a'});
+  ASSERT_EQ(utf8.length, 1);
+  EXPECT_EQ(utf8.bytes[0], 'a');
+}
+
+TEST(Utf8ForCodePointTest, RecognizesTheLargestOneByteCodePoint) {
+  Utf8ForCodePoint utf8(uint64_t{0x7f});
+  ASSERT_EQ(utf8.length, 1);
+  EXPECT_EQ(utf8.bytes[0], '\x7f');
+}
+
+TEST(Utf8ForCodePointTest, RecognizesTheSmallestTwoByteCodePoint) {
+  Utf8ForCodePoint utf8(uint64_t{0x80});
+  ASSERT_EQ(utf8.length, 2);
+  EXPECT_EQ(utf8.bytes[0], static_cast<char>(0xc2));
+  EXPECT_EQ(utf8.bytes[1], static_cast<char>(0x80));
+}
+
+TEST(Utf8ForCodePointTest, RecognizesSmallNWithTilde) {
+  Utf8ForCodePoint utf8(uint64_t{0xf1});
+  ASSERT_EQ(utf8.length, 2);
+  const char* want = "ñ";
+  EXPECT_EQ(utf8.bytes[0], want[0]);
+  EXPECT_EQ(utf8.bytes[1], want[1]);
+}
+
+TEST(Utf8ForCodePointTest, RecognizesCapitalPi) {
+  Utf8ForCodePoint utf8(uint64_t{0x3a0});
+  ASSERT_EQ(utf8.length, 2);
+  const char* want = "Π";
+  EXPECT_EQ(utf8.bytes[0], want[0]);
+  EXPECT_EQ(utf8.bytes[1], want[1]);
+}
+
+TEST(Utf8ForCodePointTest, RecognizesTheLargestTwoByteCodePoint) {
+  Utf8ForCodePoint utf8(uint64_t{0x7ff});
+  ASSERT_EQ(utf8.length, 2);
+  EXPECT_EQ(utf8.bytes[0], static_cast<char>(0xdf));
+  EXPECT_EQ(utf8.bytes[1], static_cast<char>(0xbf));
+}
+
+TEST(Utf8ForCodePointTest, RecognizesTheSmallestThreeByteCodePoint) {
+  Utf8ForCodePoint utf8(uint64_t{0x800});
+  ASSERT_EQ(utf8.length, 3);
+  EXPECT_EQ(utf8.bytes[0], static_cast<char>(0xe0));
+  EXPECT_EQ(utf8.bytes[1], static_cast<char>(0xa0));
+  EXPECT_EQ(utf8.bytes[2], static_cast<char>(0x80));
+}
+
+TEST(Utf8ForCodePointTest, RecognizesTheChineseCharacterZhong1AsInZhong1Wen2) {
+  Utf8ForCodePoint utf8(uint64_t{0x4e2d});
+  ASSERT_EQ(utf8.length, 3);
+  const char* want = "中";
+  EXPECT_EQ(utf8.bytes[0], want[0]);
+  EXPECT_EQ(utf8.bytes[1], want[1]);
+  EXPECT_EQ(utf8.bytes[2], want[2]);
+}
+
+TEST(Utf8ForCodePointTest, RecognizesOneBeforeTheSmallestSurrogate) {
+  Utf8ForCodePoint utf8(uint64_t{0xd7ff});
+  ASSERT_EQ(utf8.length, 3);
+  EXPECT_EQ(utf8.bytes[0], static_cast<char>(0xed));
+  EXPECT_EQ(utf8.bytes[1], static_cast<char>(0x9f));
+  EXPECT_EQ(utf8.bytes[2], static_cast<char>(0xbf));
+}
+
+TEST(Utf8ForCodePointTest, RejectsTheSmallestSurrogate) {
+  Utf8ForCodePoint utf8(uint64_t{0xd800});
+  EXPECT_EQ(utf8.length, 0);
+}
+
+TEST(Utf8ForCodePointTest, RejectsTheLargestSurrogate) {
+  Utf8ForCodePoint utf8(uint64_t{0xdfff});
+  EXPECT_EQ(utf8.length, 0);
+}
+
+TEST(Utf8ForCodePointTest, RecognizesOnePastTheLargestSurrogate) {
+  Utf8ForCodePoint utf8(uint64_t{0xe000});
+  ASSERT_EQ(utf8.length, 3);
+  EXPECT_EQ(utf8.bytes[0], static_cast<char>(0xee));
+  EXPECT_EQ(utf8.bytes[1], static_cast<char>(0x80));
+  EXPECT_EQ(utf8.bytes[2], static_cast<char>(0x80));
+}
+
+TEST(Utf8ForCodePointTest, RecognizesTheLargestThreeByteCodePoint) {
+  Utf8ForCodePoint utf8(uint64_t{0xffff});
+  ASSERT_EQ(utf8.length, 3);
+  EXPECT_EQ(utf8.bytes[0], static_cast<char>(0xef));
+  EXPECT_EQ(utf8.bytes[1], static_cast<char>(0xbf));
+  EXPECT_EQ(utf8.bytes[2], static_cast<char>(0xbf));
+}
+
+TEST(Utf8ForCodePointTest, RecognizesTheSmallestFourByteCodePoint) {
+  Utf8ForCodePoint utf8(uint64_t{0x10000});
+  ASSERT_EQ(utf8.length, 4);
+  EXPECT_EQ(utf8.bytes[0], static_cast<char>(0xf0));
+  EXPECT_EQ(utf8.bytes[1], static_cast<char>(0x90));
+  EXPECT_EQ(utf8.bytes[2], static_cast<char>(0x80));
+  EXPECT_EQ(utf8.bytes[3], static_cast<char>(0x80));
+}
+
+TEST(Utf8ForCodePointTest, RecognizesTheJackOfHearts) {
+  Utf8ForCodePoint utf8(uint64_t{0x1f0bb});
+  ASSERT_EQ(utf8.length, 4);
+  const char* want = "🂻";
+  EXPECT_EQ(utf8.bytes[0], want[0]);
+  EXPECT_EQ(utf8.bytes[1], want[1]);
+  EXPECT_EQ(utf8.bytes[2], want[2]);
+  EXPECT_EQ(utf8.bytes[3], want[3]);
+}
+
+TEST(Utf8ForCodePointTest, RecognizesTheLargestFourByteCodePoint) {
+  Utf8ForCodePoint utf8(uint64_t{0x10ffff});
+  ASSERT_EQ(utf8.length, 4);
+  EXPECT_EQ(utf8.bytes[0], static_cast<char>(0xf4));
+  EXPECT_EQ(utf8.bytes[1], static_cast<char>(0x8f));
+  EXPECT_EQ(utf8.bytes[2], static_cast<char>(0xbf));
+  EXPECT_EQ(utf8.bytes[3], static_cast<char>(0xbf));
+}
+
+TEST(Utf8ForCodePointTest, RejectsTheSmallestOverlargeCodePoint) {
+  Utf8ForCodePoint utf8(uint64_t{0x110000});
+  EXPECT_EQ(utf8.length, 0);
+}
+
+TEST(Utf8ForCodePointTest, RejectsAThroughlyOverlargeCodePoint) {
+  Utf8ForCodePoint utf8(uint64_t{0xffffffff00000000});
+  EXPECT_EQ(utf8.length, 0);
+}
+
+TEST(Utf8ForCodePointTest, OkReturnsTrueForAValidCodePoint) {
+  EXPECT_TRUE(Utf8ForCodePoint(uint64_t{0}).ok());
+}
+
+TEST(Utf8ForCodePointTest, OkReturnsFalseForAnInvalidCodePoint) {
+  EXPECT_FALSE(Utf8ForCodePoint(uint64_t{0xffffffff00000000}).ok());
+}
+
+}  // namespace
+}  // namespace debugging_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/absl/flags/BUILD.bazel b/absl/flags/BUILD.bazel
index d3b0622..7a8ec7e 100644
--- a/absl/flags/BUILD.bazel
+++ b/absl/flags/BUILD.bazel
@@ -236,10 +236,10 @@
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
+        ":commandlineflag",
         ":config",
         ":flag_internal",
         ":reflection",
-        "//absl/base",
         "//absl/base:config",
         "//absl/base:core_headers",
         "//absl/strings",
@@ -343,7 +343,6 @@
     ],
     deps = [
         ":commandlineflag",
-        ":commandlineflag_internal",
         ":config",
         ":flag",
         ":private_handle_accessor",
@@ -391,12 +390,15 @@
         ":flag",
         ":flag_internal",
         ":marshalling",
+        ":parse",
         ":reflection",
         "//absl/base:core_headers",
         "//absl/base:malloc_internal",
+        "//absl/base:raw_logging_internal",
         "//absl/numeric:int128",
         "//absl/strings",
         "//absl/time",
+        "//absl/types:optional",
         "@com_google_googletest//:gtest",
         "@com_google_googletest//:gtest_main",
     ],
@@ -404,7 +406,7 @@
 
 cc_binary(
     name = "flag_benchmark",
-    testonly = 1,
+    testonly = True,
     srcs = [
         "flag_benchmark.cc",
     ],
@@ -459,6 +461,7 @@
         "no_test_wasm",
     ],
     deps = [
+        ":config",
         ":flag",
         ":parse",
         ":reflection",
@@ -520,11 +523,9 @@
         "no_test_wasm",
     ],
     deps = [
-        ":commandlineflag_internal",
+        ":config",
         ":flag",
-        ":marshalling",
         ":reflection",
-        ":usage_internal",
         "//absl/memory",
         "//absl/strings",
         "@com_google_googletest//:gtest",
diff --git a/absl/flags/CMakeLists.txt b/absl/flags/CMakeLists.txt
index 4495312..7376d11 100644
--- a/absl/flags/CMakeLists.txt
+++ b/absl/flags/CMakeLists.txt
@@ -214,7 +214,6 @@
     absl::flags_config
     absl::flags_internal
     absl::flags_reflection
-    absl::base
     absl::core_headers
     absl::strings
 )
@@ -307,7 +306,6 @@
   DEPS
     absl::flags
     absl::flags_commandlineflag
-    absl::flags_commandlineflag_internal
     absl::flags_config
     absl::flags_private_handle_accessor
     absl::flags_reflection
@@ -342,8 +340,11 @@
     absl::flags_config
     absl::flags_internal
     absl::flags_marshalling
+    absl::flags_parse
     absl::flags_reflection
     absl::int128
+    absl::optional
+    absl::raw_logging_internal
     absl::strings
     absl::time
     GTest::gtest_main
@@ -370,6 +371,7 @@
     ${ABSL_TEST_COPTS}
   DEPS
     absl::flags
+    absl::flags_config
     absl::flags_parse
     absl::flags_reflection
     absl::flags_usage_internal
@@ -413,8 +415,8 @@
   COPTS
     ${ABSL_TEST_COPTS}
   DEPS
-    absl::flags_commandlineflag_internal
     absl::flags
+    absl::flags_config
     absl::flags_reflection
     absl::flags_usage
     absl::memory
diff --git a/absl/flags/commandlineflag.h b/absl/flags/commandlineflag.h
index c30aa60..26ec0e7 100644
--- a/absl/flags/commandlineflag.h
+++ b/absl/flags/commandlineflag.h
@@ -59,6 +59,14 @@
 //   // Now you can get flag info from that reflection handle.
 //   std::string flag_location = my_flag_data->Filename();
 //   ...
+
+// These are only used as constexpr global objects.
+// They do not use a virtual destructor to simplify their implementation.
+// They are not destroyed except at program exit, so leaks do not matter.
+#if defined(__GNUC__) && !defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wnon-virtual-dtor"
+#endif
 class CommandLineFlag {
  public:
   constexpr CommandLineFlag() = default;
@@ -193,6 +201,9 @@
   // flag's value type.
   virtual void CheckDefaultValueParsingRoundtrip() const = 0;
 };
+#if defined(__GNUC__) && !defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
 
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/absl/flags/commandlineflag_test.cc b/absl/flags/commandlineflag_test.cc
index 585db4b..54700cf 100644
--- a/absl/flags/commandlineflag_test.cc
+++ b/absl/flags/commandlineflag_test.cc
@@ -19,8 +19,8 @@
 #include <string>
 
 #include "gtest/gtest.h"
+#include "absl/flags/config.h"
 #include "absl/flags/flag.h"
-#include "absl/flags/internal/commandlineflag.h"
 #include "absl/flags/internal/private_handle_accessor.h"
 #include "absl/flags/reflection.h"
 #include "absl/flags/usage_config.h"
@@ -51,7 +51,12 @@
     absl::SetFlagsUsageConfig(default_config);
   }
 
-  void SetUp() override { flag_saver_ = absl::make_unique<absl::FlagSaver>(); }
+  void SetUp() override {
+#if ABSL_FLAGS_STRIP_NAMES
+    GTEST_SKIP() << "This test requires flag names to be present";
+#endif
+    flag_saver_ = absl::make_unique<absl::FlagSaver>();
+  }
   void TearDown() override { flag_saver_.reset(); }
 
  private:
diff --git a/absl/flags/flag.h b/absl/flags/flag.h
index 06ea693..a8e0e93 100644
--- a/absl/flags/flag.h
+++ b/absl/flags/flag.h
@@ -29,12 +29,14 @@
 #ifndef ABSL_FLAGS_FLAG_H_
 #define ABSL_FLAGS_FLAG_H_
 
+#include <cstdint>
 #include <string>
 #include <type_traits>
 
 #include "absl/base/attributes.h"
 #include "absl/base/config.h"
 #include "absl/base/optimization.h"
+#include "absl/flags/commandlineflag.h"
 #include "absl/flags/config.h"
 #include "absl/flags/internal/flag.h"
 #include "absl/flags/internal/registry.h"
diff --git a/absl/flags/flag_benchmark.cc b/absl/flags/flag_benchmark.cc
index 758a6a5..88cc0c5 100644
--- a/absl/flags/flag_benchmark.cc
+++ b/absl/flags/flag_benchmark.cc
@@ -143,7 +143,7 @@
 #pragma clang section data = ".benchmark_flags"
 #endif
 #define DEFINE_FLAG(T, name, index) ABSL_FLAG(T, name##_##index, {}, "");
-#define FLAG_DEF(T) REPLICATE(DEFINE_FLAG, T, T##_flag);
+#define FLAG_DEF(T) REPLICATE(DEFINE_FLAG, T, T##_flag)
 BENCHMARKED_TYPES(FLAG_DEF)
 #if defined(__clang__) && defined(__linux__)
 #pragma clang section data = ""
diff --git a/absl/flags/flag_test.cc b/absl/flags/flag_test.cc
index 8d14ba8..8af5bf7 100644
--- a/absl/flags/flag_test.cc
+++ b/absl/flags/flag_test.cc
@@ -19,19 +19,19 @@
 #include <stdint.h>
 
 #include <atomic>
-#include <cmath>
-#include <new>
 #include <string>
 #include <thread>  // NOLINT
 #include <vector>
 
 #include "gtest/gtest.h"
 #include "absl/base/attributes.h"
+#include "absl/base/internal/raw_logging.h"
 #include "absl/base/macros.h"
 #include "absl/flags/config.h"
 #include "absl/flags/declare.h"
 #include "absl/flags/internal/flag.h"
 #include "absl/flags/marshalling.h"
+#include "absl/flags/parse.h"
 #include "absl/flags/reflection.h"
 #include "absl/flags/usage_config.h"
 #include "absl/numeric/int128.h"
@@ -40,7 +40,9 @@
 #include "absl/strings/str_cat.h"
 #include "absl/strings/str_split.h"
 #include "absl/strings/string_view.h"
+#include "absl/time/clock.h"
 #include "absl/time/time.h"
+#include "absl/types/optional.h"
 
 ABSL_DECLARE_FLAG(int64_t, mistyped_int_flag);
 ABSL_DECLARE_FLAG(std::vector<std::string>, mistyped_string_flag);
@@ -125,9 +127,9 @@
 #endif
 
   EXPECT_EQ(flags::StorageKind<std::string>(),
-            flags::FlagValueStorageKind::kAlignedBuffer);
+            flags::FlagValueStorageKind::kHeapAllocated);
   EXPECT_EQ(flags::StorageKind<std::vector<std::string>>(),
-            flags::FlagValueStorageKind::kAlignedBuffer);
+            flags::FlagValueStorageKind::kHeapAllocated);
 
   EXPECT_EQ(flags::StorageKind<absl::int128>(),
             flags::FlagValueStorageKind::kSequenceLocked);
@@ -226,9 +228,10 @@
 
 namespace {
 
-#if !ABSL_FLAGS_STRIP_NAMES
-
 TEST_F(FlagTest, TestFlagDeclaration) {
+#if ABSL_FLAGS_STRIP_NAMES
+  GTEST_SKIP() << "This test requires flag names to be present";
+#endif
   // test that we can access flag objects.
   EXPECT_EQ(absl::GetFlagReflectionHandle(FLAGS_test_flag_01).Name(),
             "test_flag_01");
@@ -259,12 +262,27 @@
   EXPECT_EQ(absl::GetFlagReflectionHandle(FLAGS_test_flag_14).Name(),
             "test_flag_14");
 }
-#endif  // !ABSL_FLAGS_STRIP_NAMES
-
-// --------------------------------------------------------------------
 
 }  // namespace
 
+#if ABSL_FLAGS_STRIP_NAMES
+// The intent of this helper struct and an expression below is to make sure that
+// in the configuration where ABSL_FLAGS_STRIP_NAMES=1 registrar construction
+// (in cases of no Tail calls like OnUpdate) is constexpr and thus can and
+// should be completely optimized away, thus avoiding the cost/overhead of
+// static initializers.
+struct VerifyConsteval {
+  friend consteval flags::FlagRegistrarEmpty operator+(
+      flags::FlagRegistrarEmpty, VerifyConsteval) {
+    return {};
+  }
+};
+
+ABSL_FLAG(int, test_registrar_const_init, 0, "") + VerifyConsteval();
+#endif
+
+// --------------------------------------------------------------------
+
 ABSL_FLAG(bool, test_flag_01, true, "test flag 01");
 ABSL_FLAG(int, test_flag_02, 1234, "test flag 02");
 ABSL_FLAG(int16_t, test_flag_03, -34, "test flag 03");
@@ -283,8 +301,10 @@
 
 namespace {
 
-#if !ABSL_FLAGS_STRIP_NAMES
 TEST_F(FlagTest, TestFlagDefinition) {
+#if ABSL_FLAGS_STRIP_NAMES
+  GTEST_SKIP() << "This test requires flag names to be present";
+#endif
   absl::string_view expected_file_name = "absl/flags/flag_test.cc";
 
   EXPECT_EQ(absl::GetFlagReflectionHandle(FLAGS_test_flag_01).Name(),
@@ -413,7 +433,6 @@
       expected_file_name))
       << absl::GetFlagReflectionHandle(FLAGS_test_flag_14).Filename();
 }
-#endif  // !ABSL_FLAGS_STRIP_NAMES
 
 // --------------------------------------------------------------------
 
@@ -497,8 +516,10 @@
 
 struct NonTriviallyCopyableAggregate {
   NonTriviallyCopyableAggregate() = default;
+  // NOLINTNEXTLINE
   NonTriviallyCopyableAggregate(const NonTriviallyCopyableAggregate& rhs)
       : value(rhs.value) {}
+  // NOLINTNEXTLINE
   NonTriviallyCopyableAggregate& operator=(
       const NonTriviallyCopyableAggregate& rhs) {
     value = rhs.value;
@@ -604,6 +625,9 @@
 // --------------------------------------------------------------------
 
 TEST_F(FlagTest, TestGetViaReflection) {
+#if ABSL_FLAGS_STRIP_NAMES
+  GTEST_SKIP() << "This test requires flag names to be present";
+#endif
   auto* handle = absl::FindCommandLineFlag("test_flag_01");
   EXPECT_EQ(*handle->TryGet<bool>(), true);
   handle = absl::FindCommandLineFlag("test_flag_02");
@@ -638,6 +662,9 @@
 // --------------------------------------------------------------------
 
 TEST_F(FlagTest, ConcurrentSetAndGet) {
+#if ABSL_FLAGS_STRIP_NAMES
+  GTEST_SKIP() << "This test requires flag names to be present";
+#endif
   static constexpr int kNumThreads = 8;
   // Two arbitrary durations. One thread will concurrently flip the flag
   // between these two values, while the other threads read it and verify
@@ -785,10 +812,12 @@
 // MSVC produces link error on the type mismatch.
 // Linux does not have build errors and validations work as expected.
 #if !defined(_WIN32) && GTEST_HAS_DEATH_TEST
-
 using FlagDeathTest = FlagTest;
 
 TEST_F(FlagDeathTest, TestTypeMismatchValidations) {
+#if ABSL_FLAGS_STRIP_NAMES
+  GTEST_SKIP() << "This test requires flag names to be present";
+#endif
 #if !defined(NDEBUG)
   EXPECT_DEATH_IF_SUPPORTED(
       static_cast<void>(absl::GetFlag(FLAGS_mistyped_int_flag)),
@@ -949,31 +978,15 @@
 }
 std::string AbslUnparseFlag(const SmallAlignUDT&) { return ""; }
 
-// User-defined type with small size, but not trivially copyable.
-struct NonTriviallyCopyableUDT {
-  NonTriviallyCopyableUDT() : c('A') {}
-  NonTriviallyCopyableUDT(const NonTriviallyCopyableUDT& rhs) : c(rhs.c) {}
-  NonTriviallyCopyableUDT& operator=(const NonTriviallyCopyableUDT& rhs) {
-    c = rhs.c;
-    return *this;
-  }
-
-  char c;
-};
-
-bool AbslParseFlag(absl::string_view, NonTriviallyCopyableUDT*, std::string*) {
-  return true;
-}
-std::string AbslUnparseFlag(const NonTriviallyCopyableUDT&) { return ""; }
-
 }  // namespace
 
 ABSL_FLAG(SmallAlignUDT, test_flag_sa_udt, {}, "help");
-ABSL_FLAG(NonTriviallyCopyableUDT, test_flag_ntc_udt, {}, "help");
 
 namespace {
 
 TEST_F(FlagTest, TestSmallAlignUDT) {
+  EXPECT_EQ(flags::StorageKind<SmallAlignUDT>(),
+            flags::FlagValueStorageKind::kSequenceLocked);
   SmallAlignUDT value = absl::GetFlag(FLAGS_test_flag_sa_udt);
   EXPECT_EQ(value.c, 'A');
   EXPECT_EQ(value.s, 12);
@@ -985,15 +998,174 @@
   EXPECT_EQ(value.c, 'B');
   EXPECT_EQ(value.s, 45);
 }
+}  // namespace
 
-TEST_F(FlagTest, TestNonTriviallyCopyableUDT) {
-  NonTriviallyCopyableUDT value = absl::GetFlag(FLAGS_test_flag_ntc_udt);
-  EXPECT_EQ(value.c, 'A');
+// --------------------------------------------------------------------
 
-  value.c = 'B';
-  absl::SetFlag(&FLAGS_test_flag_ntc_udt, value);
-  value = absl::GetFlag(FLAGS_test_flag_ntc_udt);
-  EXPECT_EQ(value.c, 'B');
+namespace {
+
+// User-defined not trivially copyable type.
+template <int id>
+struct NonTriviallyCopyableUDT {
+  NonTriviallyCopyableUDT() : c('A') { s_num_instance++; }
+  NonTriviallyCopyableUDT(const NonTriviallyCopyableUDT& rhs) : c(rhs.c) {
+    s_num_instance++;
+  }
+  NonTriviallyCopyableUDT& operator=(const NonTriviallyCopyableUDT& rhs) {
+    c = rhs.c;
+    return *this;
+  }
+  ~NonTriviallyCopyableUDT() { s_num_instance--; }
+
+  static uint64_t s_num_instance;
+  char c;
+};
+
+template <int id>
+uint64_t NonTriviallyCopyableUDT<id>::s_num_instance = 0;
+
+template <int id>
+bool AbslParseFlag(absl::string_view txt, NonTriviallyCopyableUDT<id>* f,
+                   std::string*) {
+  f->c = txt.empty() ? '\0' : txt[0];
+  return true;
+}
+template <int id>
+std::string AbslUnparseFlag(const NonTriviallyCopyableUDT<id>&) {
+  return "";
+}
+
+template <int id, typename F>
+void TestExpectedLeaks(
+    F&& f, uint64_t num_leaks,
+    absl::optional<uint64_t> num_new_instances = absl::nullopt) {
+  if (!num_new_instances.has_value()) num_new_instances = num_leaks;
+
+  auto num_leaked_before = flags::NumLeakedFlagValues();
+  auto num_instances_before = NonTriviallyCopyableUDT<id>::s_num_instance;
+  f();
+  EXPECT_EQ(num_leaked_before + num_leaks, flags::NumLeakedFlagValues());
+  EXPECT_EQ(num_instances_before + num_new_instances.value(),
+            NonTriviallyCopyableUDT<id>::s_num_instance);
+}
+}  // namespace
+
+ABSL_FLAG(NonTriviallyCopyableUDT<1>, test_flag_ntc_udt1, {}, "help");
+ABSL_FLAG(NonTriviallyCopyableUDT<2>, test_flag_ntc_udt2, {}, "help");
+ABSL_FLAG(NonTriviallyCopyableUDT<3>, test_flag_ntc_udt3, {}, "help");
+ABSL_FLAG(NonTriviallyCopyableUDT<4>, test_flag_ntc_udt4, {}, "help");
+ABSL_FLAG(NonTriviallyCopyableUDT<5>, test_flag_ntc_udt5, {}, "help");
+
+namespace {
+
+TEST_F(FlagTest, TestNonTriviallyCopyableGetSetSet) {
+  EXPECT_EQ(flags::StorageKind<NonTriviallyCopyableUDT<1>>(),
+            flags::FlagValueStorageKind::kHeapAllocated);
+
+  TestExpectedLeaks<1>(
+      [&] {
+        NonTriviallyCopyableUDT<1> value =
+            absl::GetFlag(FLAGS_test_flag_ntc_udt1);
+        EXPECT_EQ(value.c, 'A');
+      },
+      0);
+
+  TestExpectedLeaks<1>(
+      [&] {
+        NonTriviallyCopyableUDT<1> value;
+        value.c = 'B';
+        absl::SetFlag(&FLAGS_test_flag_ntc_udt1, value);
+        EXPECT_EQ(value.c, 'B');
+      },
+      1);
+
+  TestExpectedLeaks<1>(
+      [&] {
+        NonTriviallyCopyableUDT<1> value;
+        value.c = 'C';
+        absl::SetFlag(&FLAGS_test_flag_ntc_udt1, value);
+      },
+      0);
+}
+
+TEST_F(FlagTest, TestNonTriviallyCopyableParseSet) {
+  TestExpectedLeaks<2>(
+      [&] {
+        const char* in_argv[] = {"testbin", "--test_flag_ntc_udt2=A"};
+        absl::ParseCommandLine(2, const_cast<char**>(in_argv));
+      },
+      0);
+
+  TestExpectedLeaks<2>(
+      [&] {
+        NonTriviallyCopyableUDT<2> value;
+        value.c = 'B';
+        absl::SetFlag(&FLAGS_test_flag_ntc_udt2, value);
+        EXPECT_EQ(value.c, 'B');
+      },
+      0);
+}
+
+TEST_F(FlagTest, TestNonTriviallyCopyableSet) {
+  TestExpectedLeaks<3>(
+      [&] {
+        NonTriviallyCopyableUDT<3> value;
+        value.c = 'B';
+        absl::SetFlag(&FLAGS_test_flag_ntc_udt3, value);
+        EXPECT_EQ(value.c, 'B');
+      },
+      0);
+}
+
+// One new instance created during initialization and stored in the flag.
+auto premain_utd4_get =
+    (TestExpectedLeaks<4>([] { (void)absl::GetFlag(FLAGS_test_flag_ntc_udt4); },
+                          0, 1),
+     false);
+
+TEST_F(FlagTest, TestNonTriviallyCopyableGetBeforeMainParseGet) {
+  TestExpectedLeaks<4>(
+      [&] {
+        const char* in_argv[] = {"testbin", "--test_flag_ntc_udt4=C"};
+        absl::ParseCommandLine(2, const_cast<char**>(in_argv));
+      },
+      1);
+
+  TestExpectedLeaks<4>(
+      [&] {
+        NonTriviallyCopyableUDT<4> value =
+            absl::GetFlag(FLAGS_test_flag_ntc_udt4);
+        EXPECT_EQ(value.c, 'C');
+      },
+      0);
+}
+
+// One new instance created during initialization, which is reused since it was
+// never read.
+auto premain_utd5_set = (TestExpectedLeaks<5>(
+                             [] {
+                               NonTriviallyCopyableUDT<5> value;
+                               value.c = 'B';
+                               absl::SetFlag(&FLAGS_test_flag_ntc_udt5, value);
+                             },
+                             0, 1),
+                         false);
+
+TEST_F(FlagTest, TestNonTriviallyCopyableSetParseGet) {
+  TestExpectedLeaks<5>(
+      [&] {
+        const char* in_argv[] = {"testbin", "--test_flag_ntc_udt5=C"};
+        absl::ParseCommandLine(2, const_cast<char**>(in_argv));
+      },
+      0);
+
+  TestExpectedLeaks<5>(
+      [&] {
+        NonTriviallyCopyableUDT<5> value =
+            absl::GetFlag(FLAGS_test_flag_ntc_udt5);
+        EXPECT_EQ(value.c, 'C');
+      },
+      0);
 }
 
 }  // namespace
@@ -1044,13 +1216,7 @@
 
 // --------------------------------------------------------------------
 
-#if defined(__GNUC__) && !defined(__clang__) && __GNUC__ <= 5
-#define ABSL_SKIP_OPTIONAL_BOOL_TEST_DUE_TO_GCC_BUG
-#endif
-
-#ifndef ABSL_SKIP_OPTIONAL_BOOL_TEST_DUE_TO_GCC_BUG
 ABSL_FLAG(absl::optional<bool>, optional_bool, absl::nullopt, "help");
-#endif
 ABSL_FLAG(absl::optional<int>, optional_int, {}, "help");
 ABSL_FLAG(absl::optional<double>, optional_double, 9.3, "help");
 ABSL_FLAG(absl::optional<std::string>, optional_string, absl::nullopt, "help");
@@ -1064,7 +1230,6 @@
 
 namespace {
 
-#ifndef ABSL_SKIP_OPTIONAL_BOOL_TEST_DUE_TO_GCC_BUG
 TEST_F(FlagTest, TestOptionalBool) {
   EXPECT_FALSE(absl::GetFlag(FLAGS_optional_bool).has_value());
   EXPECT_EQ(absl::GetFlag(FLAGS_optional_bool), absl::nullopt);
@@ -1083,7 +1248,6 @@
 }
 
 // --------------------------------------------------------------------
-#endif
 
 TEST_F(FlagTest, TestOptionalInt) {
   EXPECT_FALSE(absl::GetFlag(FLAGS_optional_int).has_value());
diff --git a/absl/flags/internal/flag.cc b/absl/flags/internal/flag.cc
index 65d0e58..981f19f 100644
--- a/absl/flags/internal/flag.cc
+++ b/absl/flags/internal/flag.cc
@@ -22,14 +22,17 @@
 
 #include <array>
 #include <atomic>
+#include <cstring>
 #include <memory>
-#include <new>
 #include <string>
 #include <typeinfo>
+#include <vector>
 
+#include "absl/base/attributes.h"
 #include "absl/base/call_once.h"
 #include "absl/base/casts.h"
 #include "absl/base/config.h"
+#include "absl/base/const_init.h"
 #include "absl/base/dynamic_annotations.h"
 #include "absl/base/optimization.h"
 #include "absl/flags/config.h"
@@ -44,10 +47,9 @@
 ABSL_NAMESPACE_BEGIN
 namespace flags_internal {
 
-// The help message indicating that the commandline flag has been
-// 'stripped'. It will not show up when doing "-help" and its
-// variants. The flag is stripped if ABSL_FLAGS_STRIP_HELP is set to 1
-// before including absl/flags/flag.h
+// The help message indicating that the commandline flag has been stripped. It
+// will not show up when doing "-help" and its variants. The flag is stripped
+// if ABSL_FLAGS_STRIP_HELP is set to 1 before including absl/flags/flag.h
 const char kStrippedFlagHelp[] = "\001\002\003\004 (unknown) \004\003\002\001";
 
 namespace {
@@ -78,9 +80,32 @@
   absl::Mutex& mu_;
 };
 
+// This is a freelist of leaked flag values and guard for its access.
+// When we can't guarantee it is safe to reuse the memory for flag values,
+// we move the memory to the freelist where it lives indefinitely, so it can
+// still be safely accessed. This also prevents leak checkers from complaining
+// about the leaked memory that can no longer be accessed through any pointer.
+ABSL_CONST_INIT absl::Mutex s_freelist_guard(absl::kConstInit);
+ABSL_CONST_INIT std::vector<void*>* s_freelist = nullptr;
+
+void AddToFreelist(void* p) {
+  absl::MutexLock l(&s_freelist_guard);
+  if (!s_freelist) {
+    s_freelist = new std::vector<void*>;
+  }
+  s_freelist->push_back(p);
+}
+
 }  // namespace
 
 ///////////////////////////////////////////////////////////////////////////////
+
+uint64_t NumLeakedFlagValues() {
+  absl::MutexLock l(&s_freelist_guard);
+  return s_freelist == nullptr ? 0u : s_freelist->size();
+}
+
+///////////////////////////////////////////////////////////////////////////////
 // Persistent state of the flag data.
 
 class FlagImpl;
@@ -97,7 +122,7 @@
         counter_(counter) {}
 
   ~FlagState() override {
-    if (flag_impl_.ValueStorageKind() != FlagValueStorageKind::kAlignedBuffer &&
+    if (flag_impl_.ValueStorageKind() != FlagValueStorageKind::kHeapAllocated &&
         flag_impl_.ValueStorageKind() != FlagValueStorageKind::kSequenceLocked)
       return;
     flags_internal::Delete(flag_impl_.op_, value_.heap_allocated);
@@ -140,6 +165,33 @@
   Delete(op, ptr);
 }
 
+MaskedPointer::MaskedPointer(ptr_t rhs, bool is_candidate) : ptr_(rhs) {
+  if (is_candidate) {
+    ApplyMask(kUnprotectedReadCandidate);
+  }
+}
+
+bool MaskedPointer::IsUnprotectedReadCandidate() const {
+  return CheckMask(kUnprotectedReadCandidate);
+}
+
+bool MaskedPointer::HasBeenRead() const { return CheckMask(kHasBeenRead); }
+
+void MaskedPointer::Set(FlagOpFn op, const void* src, bool is_candidate) {
+  flags_internal::Copy(op, src, Ptr());
+  if (is_candidate) {
+    ApplyMask(kUnprotectedReadCandidate);
+  }
+}
+void MaskedPointer::MarkAsRead() { ApplyMask(kHasBeenRead); }
+
+void MaskedPointer::ApplyMask(mask_t mask) {
+  ptr_ = reinterpret_cast<ptr_t>(reinterpret_cast<mask_t>(ptr_) | mask);
+}
+bool MaskedPointer::CheckMask(mask_t mask) const {
+  return (reinterpret_cast<mask_t>(ptr_) & mask) != 0;
+}
+
 void FlagImpl::Init() {
   new (&data_guard_) absl::Mutex;
 
@@ -174,11 +226,16 @@
       (*default_value_.gen_func)(AtomicBufferValue());
       break;
     }
-    case FlagValueStorageKind::kAlignedBuffer:
+    case FlagValueStorageKind::kHeapAllocated:
       // For this storage kind the default_value_ always points to gen_func
       // during initialization.
       assert(def_kind == FlagDefaultKind::kGenFunc);
-      (*default_value_.gen_func)(AlignedBufferValue());
+      // Flag value initially points to the internal buffer.
+      MaskedPointer ptr_value = PtrStorage().load(std::memory_order_acquire);
+      (*default_value_.gen_func)(ptr_value.Ptr());
+      // Default value is a candidate for an unprotected read.
+      PtrStorage().store(MaskedPointer(ptr_value.Ptr(), true),
+                         std::memory_order_release);
       break;
   }
   seq_lock_.MarkInitialized();
@@ -234,7 +291,7 @@
   return {res, DynValueDeleter{op_}};
 }
 
-void FlagImpl::StoreValue(const void* src) {
+void FlagImpl::StoreValue(const void* src, ValueSource source) {
   switch (ValueStorageKind()) {
     case FlagValueStorageKind::kValueAndInitBit:
     case FlagValueStorageKind::kOneWordAtomic: {
@@ -249,8 +306,27 @@
       seq_lock_.Write(AtomicBufferValue(), src, Sizeof(op_));
       break;
     }
-    case FlagValueStorageKind::kAlignedBuffer:
-      Copy(op_, src, AlignedBufferValue());
+    case FlagValueStorageKind::kHeapAllocated:
+      MaskedPointer ptr_value = PtrStorage().load(std::memory_order_acquire);
+
+      if (ptr_value.IsUnprotectedReadCandidate() && ptr_value.HasBeenRead()) {
+        // If current value is a candidate for an unprotected read and if it was
+        // already read at least once, follow up reads (if any) are done without
+        // mutex protection. We can't guarantee it is safe to reuse this memory
+        // since it may have been accessed by another thread concurrently, so
+        // instead we move the memory to a freelist so it can still be safely
+        // accessed, and allocate a new one for the new value.
+        AddToFreelist(ptr_value.Ptr());
+        ptr_value = MaskedPointer(Clone(op_, src), source == kCommandLine);
+      } else {
+        // Current value either was set programmatically or was never read.
+        // We can reuse the memory since all accesses to this value (if any)
+        // were protected by mutex. That said, if a new value comes from command
+        // line it now becomes a candidate for an unprotected read.
+        ptr_value.Set(op_, src, source == kCommandLine);
+      }
+
+      PtrStorage().store(ptr_value, std::memory_order_release);
       seq_lock_.IncrementModificationCount();
       break;
   }
@@ -305,9 +381,10 @@
       ReadSequenceLockedData(cloned.get());
       return flags_internal::Unparse(op_, cloned.get());
     }
-    case FlagValueStorageKind::kAlignedBuffer: {
+    case FlagValueStorageKind::kHeapAllocated: {
       absl::MutexLock l(guard);
-      return flags_internal::Unparse(op_, AlignedBufferValue());
+      return flags_internal::Unparse(
+          op_, PtrStorage().load(std::memory_order_acquire).Ptr());
     }
   }
 
@@ -370,10 +447,12 @@
       return absl::make_unique<FlagState>(*this, cloned, modified,
                                           on_command_line, ModificationCount());
     }
-    case FlagValueStorageKind::kAlignedBuffer: {
+    case FlagValueStorageKind::kHeapAllocated: {
       return absl::make_unique<FlagState>(
-          *this, flags_internal::Clone(op_, AlignedBufferValue()), modified,
-          on_command_line, ModificationCount());
+          *this,
+          flags_internal::Clone(
+              op_, PtrStorage().load(std::memory_order_acquire).Ptr()),
+          modified, on_command_line, ModificationCount());
     }
   }
   return nullptr;
@@ -388,11 +467,11 @@
   switch (ValueStorageKind()) {
     case FlagValueStorageKind::kValueAndInitBit:
     case FlagValueStorageKind::kOneWordAtomic:
-      StoreValue(&flag_state.value_.one_word);
+      StoreValue(&flag_state.value_.one_word, kProgrammaticChange);
       break;
     case FlagValueStorageKind::kSequenceLocked:
-    case FlagValueStorageKind::kAlignedBuffer:
-      StoreValue(flag_state.value_.heap_allocated);
+    case FlagValueStorageKind::kHeapAllocated:
+      StoreValue(flag_state.value_.heap_allocated, kProgrammaticChange);
       break;
   }
 
@@ -411,11 +490,6 @@
   return reinterpret_cast<StorageT*>(p + offset);
 }
 
-void* FlagImpl::AlignedBufferValue() const {
-  assert(ValueStorageKind() == FlagValueStorageKind::kAlignedBuffer);
-  return OffsetValue<void>();
-}
-
 std::atomic<uint64_t>* FlagImpl::AtomicBufferValue() const {
   assert(ValueStorageKind() == FlagValueStorageKind::kSequenceLocked);
   return OffsetValue<std::atomic<uint64_t>>();
@@ -427,6 +501,11 @@
   return OffsetValue<FlagOneWordValue>()->value;
 }
 
+std::atomic<MaskedPointer>& FlagImpl::PtrStorage() const {
+  assert(ValueStorageKind() == FlagValueStorageKind::kHeapAllocated);
+  return OffsetValue<FlagMaskedPointerValue>()->value;
+}
+
 // Attempts to parse supplied `value` string using parsing routine in the `flag`
 // argument. If parsing successful, this function replaces the dst with newly
 // parsed value. In case if any error is encountered in either step, the error
@@ -460,9 +539,17 @@
       ReadSequenceLockedData(dst);
       break;
     }
-    case FlagValueStorageKind::kAlignedBuffer: {
+    case FlagValueStorageKind::kHeapAllocated: {
       absl::MutexLock l(guard);
-      flags_internal::CopyConstruct(op_, AlignedBufferValue(), dst);
+      MaskedPointer ptr_value = PtrStorage().load(std::memory_order_acquire);
+
+      flags_internal::CopyConstruct(op_, ptr_value.Ptr(), dst);
+
+      // For unprotected read candidates, mark that the value as has been read.
+      if (ptr_value.IsUnprotectedReadCandidate() && !ptr_value.HasBeenRead()) {
+        ptr_value.MarkAsRead();
+        PtrStorage().store(ptr_value, std::memory_order_release);
+      }
       break;
     }
   }
@@ -513,7 +600,7 @@
     }
   }
 
-  StoreValue(src);
+  StoreValue(src, kProgrammaticChange);
 }
 
 // Sets the value of the flag based on specified string `value`. If the flag
@@ -534,7 +621,7 @@
       auto tentative_value = TryParse(value, err);
       if (!tentative_value) return false;
 
-      StoreValue(tentative_value.get());
+      StoreValue(tentative_value.get(), source);
 
       if (source == kCommandLine) {
         on_command_line_ = true;
@@ -555,7 +642,7 @@
       auto tentative_value = TryParse(value, err);
       if (!tentative_value) return false;
 
-      StoreValue(tentative_value.get());
+      StoreValue(tentative_value.get(), source);
       break;
     }
     case SET_FLAGS_DEFAULT: {
@@ -573,7 +660,7 @@
 
       if (!modified_) {
         // Need to set both default value *and* current, in this case.
-        StoreValue(default_value_.dynamic_value);
+        StoreValue(default_value_.dynamic_value, source);
         modified_ = false;
       }
       break;
diff --git a/absl/flags/internal/flag.h b/absl/flags/internal/flag.h
index 2e6e6b8..a0be31d 100644
--- a/absl/flags/internal/flag.h
+++ b/absl/flags/internal/flag.h
@@ -22,7 +22,6 @@
 #include <atomic>
 #include <cstring>
 #include <memory>
-#include <new>
 #include <string>
 #include <type_traits>
 #include <typeinfo>
@@ -296,11 +295,8 @@
 }
 
 ///////////////////////////////////////////////////////////////////////////////
-// Flag current value auxiliary structs.
-
-constexpr int64_t UninitializedFlagValue() {
-  return static_cast<int64_t>(0xababababababababll);
-}
+// Flag storage selector traits. Each trait indicates what kind of storage kind
+// to use for the flag value.
 
 template <typename T>
 using FlagUseValueAndInitBitStorage =
@@ -322,9 +318,11 @@
   kValueAndInitBit = 0,
   kOneWordAtomic = 1,
   kSequenceLocked = 2,
-  kAlignedBuffer = 3,
+  kHeapAllocated = 3,
 };
 
+// This constexpr function returns the storage kind for the given flag value
+// type.
 template <typename T>
 static constexpr FlagValueStorageKind StorageKind() {
   return FlagUseValueAndInitBitStorage<T>::value
@@ -333,14 +331,24 @@
              ? FlagValueStorageKind::kOneWordAtomic
          : FlagUseSequenceLockStorage<T>::value
              ? FlagValueStorageKind::kSequenceLocked
-             : FlagValueStorageKind::kAlignedBuffer;
+             : FlagValueStorageKind::kHeapAllocated;
 }
 
+// This is a base class for the storage classes used by kOneWordAtomic and
+// kValueAndInitBit storage kinds. It literally just stores the one word value
+// as an atomic. By default, it is initialized to a magic value that is unlikely
+// a valid value for the flag value type.
 struct FlagOneWordValue {
+  constexpr static int64_t Uninitialized() {
+    return static_cast<int64_t>(0xababababababababll);
+  }
+
+  constexpr FlagOneWordValue() : value(Uninitialized()) {}
   constexpr explicit FlagOneWordValue(int64_t v) : value(v) {}
   std::atomic<int64_t> value;
 };
 
+// This class represents a memory layout used by kValueAndInitBit storage kind.
 template <typename T>
 struct alignas(8) FlagValueAndInitBit {
   T value;
@@ -349,16 +357,91 @@
   uint8_t init;
 };
 
+// This class implements an aligned pointer with two options stored via masks
+// in unused bits of the pointer value (due to alignment requirement).
+//  - IsUnprotectedReadCandidate - indicates that the value can be switched to
+//    unprotected read without a lock.
+//  - HasBeenRead - indicates that the value has been read at least once.
+//  - AllowsUnprotectedRead - combination of the two options above and indicates
+//    that the value can now be read without a lock.
+// Further details of these options and their use is covered in the description
+// of the FlagValue<T, FlagValueStorageKind::kHeapAllocated> specialization.
+class MaskedPointer {
+ public:
+  using mask_t = uintptr_t;
+  using ptr_t = void*;
+
+  static constexpr int RequiredAlignment() { return 4; }
+
+  constexpr explicit MaskedPointer(ptr_t rhs) : ptr_(rhs) {}
+  MaskedPointer(ptr_t rhs, bool is_candidate);
+
+  void* Ptr() const {
+    return reinterpret_cast<void*>(reinterpret_cast<mask_t>(ptr_) &
+                                   kPtrValueMask);
+  }
+  bool AllowsUnprotectedRead() const {
+    return (reinterpret_cast<mask_t>(ptr_) & kAllowsUnprotectedRead) ==
+           kAllowsUnprotectedRead;
+  }
+  bool IsUnprotectedReadCandidate() const;
+  bool HasBeenRead() const;
+
+  void Set(FlagOpFn op, const void* src, bool is_candidate);
+  void MarkAsRead();
+
+ private:
+  // Masks
+  // Indicates that the flag value either default or originated from command
+  // line.
+  static constexpr mask_t kUnprotectedReadCandidate = 0x1u;
+  // Indicates that flag has been read.
+  static constexpr mask_t kHasBeenRead = 0x2u;
+  static constexpr mask_t kAllowsUnprotectedRead =
+      kUnprotectedReadCandidate | kHasBeenRead;
+  static constexpr mask_t kPtrValueMask = ~kAllowsUnprotectedRead;
+
+  void ApplyMask(mask_t mask);
+  bool CheckMask(mask_t mask) const;
+
+  ptr_t ptr_;
+};
+
+// This class implements a type erased storage of the heap allocated flag value.
+// It is used as a base class for the storage class for kHeapAllocated storage
+// kind. The initial_buffer is expected to have an alignment of at least
+// MaskedPointer::RequiredAlignment(), so that the bits used by the
+// MaskedPointer to store masks are set to 0. This guarantees that value starts
+// in an uninitialized state.
+struct FlagMaskedPointerValue {
+  constexpr explicit FlagMaskedPointerValue(MaskedPointer::ptr_t initial_buffer)
+      : value(MaskedPointer(initial_buffer)) {}
+
+  std::atomic<MaskedPointer> value;
+};
+
+// This is the forward declaration for the template that represents a storage
+// for the flag values. This template is expected to be explicitly specialized
+// for each storage kind and it does not have a generic default
+// implementation.
 template <typename T,
           FlagValueStorageKind Kind = flags_internal::StorageKind<T>()>
 struct FlagValue;
 
+// This specialization represents the storage of flag values types with the
+// kValueAndInitBit storage kind. It is based on the FlagOneWordValue class
+// and relies on memory layout in FlagValueAndInitBit<T> to indicate that the
+// value has been initialized or not.
 template <typename T>
 struct FlagValue<T, FlagValueStorageKind::kValueAndInitBit> : FlagOneWordValue {
   constexpr FlagValue() : FlagOneWordValue(0) {}
   bool Get(const SequenceLock&, T& dst) const {
     int64_t storage = value.load(std::memory_order_acquire);
     if (ABSL_PREDICT_FALSE(storage == 0)) {
+      // This assert is to ensure that the initialization inside FlagImpl::Init
+      // is able to set init member correctly.
+      static_assert(offsetof(FlagValueAndInitBit<T>, init) == sizeof(T),
+                    "Unexpected memory layout of FlagValueAndInitBit");
       return false;
     }
     dst = absl::bit_cast<FlagValueAndInitBit<T>>(storage).value;
@@ -366,12 +449,16 @@
   }
 };
 
+// This specialization represents the storage of flag values types with the
+// kOneWordAtomic storage kind. It is based on the FlagOneWordValue class
+// and relies on the magic uninitialized state of default constructed instead of
+// FlagOneWordValue to indicate that the value has been initialized or not.
 template <typename T>
 struct FlagValue<T, FlagValueStorageKind::kOneWordAtomic> : FlagOneWordValue {
-  constexpr FlagValue() : FlagOneWordValue(UninitializedFlagValue()) {}
+  constexpr FlagValue() : FlagOneWordValue() {}
   bool Get(const SequenceLock&, T& dst) const {
     int64_t one_word_val = value.load(std::memory_order_acquire);
-    if (ABSL_PREDICT_FALSE(one_word_val == UninitializedFlagValue())) {
+    if (ABSL_PREDICT_FALSE(one_word_val == FlagOneWordValue::Uninitialized())) {
       return false;
     }
     std::memcpy(&dst, static_cast<const void*>(&one_word_val), sizeof(T));
@@ -379,6 +466,12 @@
   }
 };
 
+// This specialization represents the storage of flag values types with the
+// kSequenceLocked storage kind. This storage is used by trivially copyable
+// types with size greater than 8 bytes. This storage relies on uninitialized
+// state of the SequenceLock to indicate that the value has been initialized or
+// not. This storage also provides lock-free read access to the underlying
+// value once it is initialized.
 template <typename T>
 struct FlagValue<T, FlagValueStorageKind::kSequenceLocked> {
   bool Get(const SequenceLock& lock, T& dst) const {
@@ -392,11 +485,62 @@
       std::atomic<uint64_t>) std::atomic<uint64_t> value_words[kNumWords];
 };
 
+// This specialization represents the storage of flag values types with the
+// kHeapAllocated storage kind. This is a storage of last resort and is used
+// if none of other storage kinds are applicable.
+//
+// Generally speaking the values with this storage kind can't be accessed
+// atomically and thus can't be read without holding a lock. If we would ever
+// want to avoid the lock, we'd need to leak the old value every time new flag
+// value is being set (since we are in danger of having a race condition
+// otherwise).
+//
+// Instead of doing that, this implementation attempts to cater to some common
+// use cases by allowing at most 2 values to be leaked - default value and
+// value set from the command line.
+//
+// This specialization provides an initial buffer for the first flag value. This
+// is where the default value is going to be stored. We attempt to reuse this
+// buffer if possible, including storing the value set from the command line
+// there.
+//
+// As long as we only read this value, we can access it without a lock (in
+// practice we still use the lock for the very first read to be able set
+// "has been read" option on this flag).
+//
+// If flag is specified on the command line we store the parsed value either
+// in the internal buffer (if the default value never been read) or we leak the
+// default value and allocate the new storage for the parse value. This value is
+// also a candidate for an unprotected read. If flag is set programmatically
+// after the command line is parsed, the storage for this value is going to be
+// leaked. Note that in both scenarios we are not going to have a real leak.
+// Instead we'll store the leaked value pointers in the internal freelist to
+// avoid triggering the memory leak checker complains.
+//
+// If the flag is ever set programmatically, it stops being the candidate for an
+// unprotected read, and any follow up access to the flag value requires a lock.
+// Note that if the value if set programmatically before the command line is
+// parsed, we can switch back to enabling unprotected reads for that value.
 template <typename T>
-struct FlagValue<T, FlagValueStorageKind::kAlignedBuffer> {
-  bool Get(const SequenceLock&, T&) const { return false; }
+struct FlagValue<T, FlagValueStorageKind::kHeapAllocated>
+    : FlagMaskedPointerValue {
+  // We const initialize the value with unmasked pointer to the internal buffer,
+  // making sure it is not a candidate for unprotected read. This way we can
+  // ensure Init is done before any access to the flag value.
+  constexpr FlagValue() : FlagMaskedPointerValue(&buffer[0]) {}
 
-  alignas(T) char value[sizeof(T)];
+  bool Get(const SequenceLock&, T& dst) const {
+    MaskedPointer ptr_value = value.load(std::memory_order_acquire);
+
+    if (ABSL_PREDICT_TRUE(ptr_value.AllowsUnprotectedRead())) {
+      ::new (static_cast<void*>(&dst)) T(*static_cast<T*>(ptr_value.Ptr()));
+      return true;
+    }
+    return false;
+  }
+
+  alignas(MaskedPointer::RequiredAlignment()) alignas(
+      T) char buffer[sizeof(T)]{};
 };
 
 ///////////////////////////////////////////////////////////////////////////////
@@ -425,6 +569,13 @@
 
 class FlagState;
 
+// These are only used as constexpr global objects.
+// They do not use a virtual destructor to simplify their implementation.
+// They are not destroyed except at program exit, so leaks do not matter.
+#if defined(__GNUC__) && !defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wnon-virtual-dtor"
+#endif
 class FlagImpl final : public CommandLineFlag {
  public:
   constexpr FlagImpl(const char* name, const char* filename, FlagOpFn op,
@@ -477,7 +628,7 @@
   // Used in read/write operations to validate source/target has correct type.
   // For example if flag is declared as absl::Flag<int> FLAGS_foo, a call to
   // absl::GetFlag(FLAGS_foo) validates that the type of FLAGS_foo is indeed
-  // int. To do that we pass the "assumed" type id (which is deduced from type
+  // int. To do that we pass the assumed type id (which is deduced from type
   // int) as an argument `type_id`, which is in turn is validated against the
   // type id stored in flag object by flag definition statement.
   void AssertValidType(FlagFastTypeId type_id,
@@ -498,17 +649,13 @@
   void Init();
 
   // Offset value access methods. One per storage kind. These methods to not
-  // respect const correctness, so be very carefull using them.
+  // respect const correctness, so be very careful using them.
 
   // This is a shared helper routine which encapsulates most of the magic. Since
   // it is only used inside the three routines below, which are defined in
   // flag.cc, we can define it in that file as well.
   template <typename StorageT>
   StorageT* OffsetValue() const;
-  // This is an accessor for a value stored in an aligned buffer storage
-  // used for non-trivially-copyable data types.
-  // Returns a mutable pointer to the start of a buffer.
-  void* AlignedBufferValue() const;
 
   // The same as above, but used for sequencelock-protected storage.
   std::atomic<uint64_t>* AtomicBufferValue() const;
@@ -517,13 +664,16 @@
   // mutable reference to an atomic value.
   std::atomic<int64_t>& OneWordValue() const;
 
+  std::atomic<MaskedPointer>& PtrStorage() const;
+
   // Attempts to parse supplied `value` string. If parsing is successful,
   // returns new value. Otherwise returns nullptr.
   std::unique_ptr<void, DynValueDeleter> TryParse(absl::string_view value,
                                                   std::string& err) const
       ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard());
   // Stores the flag value based on the pointer to the source.
-  void StoreValue(const void* src) ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard());
+  void StoreValue(const void* src, ValueSource source)
+      ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard());
 
   // Copy the flag data, protected by `seq_lock_` into `dst`.
   //
@@ -579,7 +729,7 @@
   const char* const name_;
   // The file name where ABSL_FLAG resides.
   const char* const filename_;
-  // Type-specific operations "vtable".
+  // Type-specific operations vtable.
   const FlagOpFn op_;
   // Help message literal or function to generate it.
   const FlagHelpMsg help_;
@@ -624,6 +774,9 @@
   // problems.
   alignas(absl::Mutex) mutable char data_guard_[sizeof(absl::Mutex)];
 };
+#if defined(__GNUC__) && !defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
 
 ///////////////////////////////////////////////////////////////////////////////
 // The Flag object parameterized by the flag's value type. This class implements
@@ -711,16 +864,21 @@
 // Implementation of Flag value specific operations routine.
 template <typename T>
 void* FlagOps(FlagOp op, const void* v1, void* v2, void* v3) {
+  struct AlignedSpace {
+    alignas(MaskedPointer::RequiredAlignment()) alignas(T) char buf[sizeof(T)];
+  };
+  using Allocator = std::allocator<AlignedSpace>;
   switch (op) {
     case FlagOp::kAlloc: {
-      std::allocator<T> alloc;
-      return std::allocator_traits<std::allocator<T>>::allocate(alloc, 1);
+      Allocator alloc;
+      return std::allocator_traits<Allocator>::allocate(alloc, 1);
     }
     case FlagOp::kDelete: {
       T* p = static_cast<T*>(v2);
       p->~T();
-      std::allocator<T> alloc;
-      std::allocator_traits<std::allocator<T>>::deallocate(alloc, p, 1);
+      Allocator alloc;
+      std::allocator_traits<Allocator>::deallocate(
+          alloc, reinterpret_cast<AlignedSpace*>(p), 1);
       return nullptr;
     }
     case FlagOp::kCopy:
@@ -754,8 +912,7 @@
       // Round sizeof(FlagImp) to a multiple of alignof(FlagValue<T>) to get the
       // offset of the data.
       size_t round_to = alignof(FlagValue<T>);
-      size_t offset =
-          (sizeof(FlagImpl) + round_to - 1) / round_to * round_to;
+      size_t offset = (sizeof(FlagImpl) + round_to - 1) / round_to * round_to;
       return reinterpret_cast<void*>(offset);
     }
   }
@@ -770,7 +927,8 @@
 template <typename T, bool do_register>
 class FlagRegistrar {
  public:
-  explicit FlagRegistrar(Flag<T>& flag, const char* filename) : flag_(flag) {
+  constexpr explicit FlagRegistrar(Flag<T>& flag, const char* filename)
+      : flag_(flag) {
     if (do_register)
       flags_internal::RegisterCommandLineFlag(flag_.impl_, filename);
   }
@@ -780,15 +938,19 @@
     return *this;
   }
 
-  // Make the registrar "die" gracefully as an empty struct on a line where
+  // Makes the registrar die gracefully as an empty struct on a line where
   // registration happens. Registrar objects are intended to live only as
   // temporary.
-  operator FlagRegistrarEmpty() const { return {}; }  // NOLINT
+  constexpr operator FlagRegistrarEmpty() const { return {}; }  // NOLINT
 
  private:
   Flag<T>& flag_;  // Flag being registered (not owned).
 };
 
+///////////////////////////////////////////////////////////////////////////////
+// Test only API
+uint64_t NumLeakedFlagValues();
+
 }  // namespace flags_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/absl/flags/internal/usage_test.cc b/absl/flags/internal/usage_test.cc
index 6847386..9b6d730 100644
--- a/absl/flags/internal/usage_test.cc
+++ b/absl/flags/internal/usage_test.cc
@@ -22,6 +22,7 @@
 
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
+#include "absl/flags/config.h"
 #include "absl/flags/flag.h"
 #include "absl/flags/internal/parse.h"
 #include "absl/flags/internal/program_name.h"
@@ -97,6 +98,11 @@
     flags::SetFlagsHelpMatchSubstr("");
     flags::SetFlagsHelpFormat(flags::HelpFormat::kHumanReadable);
   }
+  void SetUp() override {
+#if ABSL_FLAGS_STRIP_NAMES
+    GTEST_SKIP() << "This test requires flag names to be present";
+#endif
+  }
 
  private:
   absl::FlagSaver flag_saver_;
diff --git a/absl/flags/parse_test.cc b/absl/flags/parse_test.cc
index 97b7898..9997069 100644
--- a/absl/flags/parse_test.cc
+++ b/absl/flags/parse_test.cc
@@ -25,6 +25,7 @@
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
 #include "absl/base/internal/scoped_set_env.h"
+#include "absl/flags/config.h"
 #include "absl/flags/flag.h"
 #include "absl/flags/internal/parse.h"
 #include "absl/flags/internal/usage.h"
@@ -43,31 +44,31 @@
 #define FLAG_MULT(x) F3(x)
 #define TEST_FLAG_HEADER FLAG_HEADER_
 
-#define F(name) ABSL_FLAG(int, name, 0, "");
+#define F(name) ABSL_FLAG(int, name, 0, "")
 
 #define F1(name) \
   F(name##1);    \
   F(name##2);    \
   F(name##3);    \
   F(name##4);    \
-  F(name##5);
+  F(name##5)
 /**/
 #define F2(name) \
   F1(name##1);   \
   F1(name##2);   \
   F1(name##3);   \
   F1(name##4);   \
-  F1(name##5);
+  F1(name##5)
 /**/
 #define F3(name) \
   F2(name##1);   \
   F2(name##2);   \
   F2(name##3);   \
   F2(name##4);   \
-  F2(name##5);
+  F2(name##5)
 /**/
 
-FLAG_MULT(TEST_FLAG_HEADER)
+FLAG_MULT(TEST_FLAG_HEADER);
 
 namespace {
 
@@ -243,6 +244,12 @@
  public:
   ~ParseTest() override { flags::SetFlagsHelpMode(flags::HelpMode::kNone); }
 
+  void SetUp() override {
+#if ABSL_FLAGS_STRIP_NAMES
+    GTEST_SKIP() << "This test requires flag names to be present";
+#endif
+  }
+
  private:
   absl::FlagSaver flag_saver_;
 };
diff --git a/absl/flags/reflection.cc b/absl/flags/reflection.cc
index 841921a..ea856ff 100644
--- a/absl/flags/reflection.cc
+++ b/absl/flags/reflection.cc
@@ -217,6 +217,13 @@
 
 namespace {
 
+// These are only used as constexpr global objects.
+// They do not use a virtual destructor to simplify their implementation.
+// They are not destroyed except at program exit, so leaks do not matter.
+#if defined(__GNUC__) && !defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wnon-virtual-dtor"
+#endif
 class RetiredFlagObj final : public CommandLineFlag {
  public:
   constexpr RetiredFlagObj(const char* name, FlagFastTypeId type_id)
@@ -276,6 +283,9 @@
   const char* const name_;
   const FlagFastTypeId type_id_;
 };
+#if defined(__GNUC__) && !defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
 
 }  // namespace
 
diff --git a/absl/flags/reflection_test.cc b/absl/flags/reflection_test.cc
index 79cfa90..68abeda 100644
--- a/absl/flags/reflection_test.cc
+++ b/absl/flags/reflection_test.cc
@@ -20,10 +20,8 @@
 
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
-#include "absl/flags/declare.h"
+#include "absl/flags/config.h"
 #include "absl/flags/flag.h"
-#include "absl/flags/internal/commandlineflag.h"
-#include "absl/flags/marshalling.h"
 #include "absl/memory/memory.h"
 #include "absl/strings/str_cat.h"
 #include "absl/strings/str_split.h"
@@ -36,7 +34,12 @@
 
 class ReflectionTest : public testing::Test {
  protected:
-  void SetUp() override { flag_saver_ = absl::make_unique<absl::FlagSaver>(); }
+  void SetUp() override {
+#if ABSL_FLAGS_STRIP_NAMES
+    GTEST_SKIP() << "This test requires flag names to be present";
+#endif
+    flag_saver_ = absl::make_unique<absl::FlagSaver>();
+  }
   void TearDown() override { flag_saver_.reset(); }
 
  private:
diff --git a/absl/functional/CMakeLists.txt b/absl/functional/CMakeLists.txt
index 602829c..38fea8b 100644
--- a/absl/functional/CMakeLists.txt
+++ b/absl/functional/CMakeLists.txt
@@ -129,6 +129,10 @@
   COPTS
     ${ABSL_TEST_COPTS}
   DEPS
+    absl::config
+    absl::overload
+    absl::string_view
     absl::strings
+    absl::variant
     GTest::gmock_main
 )
diff --git a/absl/functional/any_invocable.h b/absl/functional/any_invocable.h
index 68d8825..3acb9fd 100644
--- a/absl/functional/any_invocable.h
+++ b/absl/functional/any_invocable.h
@@ -34,6 +34,7 @@
 #define ABSL_FUNCTIONAL_ANY_INVOCABLE_H_
 
 #include <cstddef>
+#include <functional>
 #include <initializer_list>
 #include <type_traits>
 #include <utility>
@@ -98,9 +99,9 @@
 // `AnyInvocable` also properly respects `const` qualifiers, reference
 // qualifiers, and the `noexcept` specification (only in C++ 17 and beyond) as
 // part of the user-specified function type (e.g.
-// `AnyInvocable<void()&& const noexcept>`). These qualifiers will be applied to
-// the `AnyInvocable` object's `operator()`, and the underlying invocable must
-// be compatible with those qualifiers.
+// `AnyInvocable<void() const && noexcept>`). These qualifiers will be applied
+// to the `AnyInvocable` object's `operator()`, and the underlying invocable
+// must be compatible with those qualifiers.
 //
 // Comparison of const and non-const function types:
 //
@@ -151,6 +152,12 @@
 //
 // Attempting to call `absl::AnyInvocable` multiple times in such a case
 // results in undefined behavior.
+//
+// Invoking an empty `absl::AnyInvocable` results in undefined behavior:
+//
+//   // Create an empty instance using the default constructor.
+//   AnyInvocable<void()> empty;
+//   empty();  // WARNING: Undefined behavior!
 template <class Sig>
 class AnyInvocable : private internal_any_invocable::Impl<Sig> {
  private:
@@ -167,6 +174,7 @@
   // Constructors
 
   // Constructs the `AnyInvocable` in an empty state.
+  // Invoking it results in undefined behavior.
   AnyInvocable() noexcept = default;
   AnyInvocable(std::nullptr_t) noexcept {}  // NOLINT
 
@@ -277,6 +285,8 @@
   // In other words:
   //   std::function<void()> f;  // empty
   //   absl::AnyInvocable<void()> a = std::move(f);  // not empty
+  //
+  // Invoking an empty `AnyInvocable` results in undefined behavior.
   explicit operator bool() const noexcept { return this->HasValue(); }
 
   // Invokes the target object of `*this`. `*this` must not be empty.
diff --git a/absl/functional/bind_front.h b/absl/functional/bind_front.h
index a956eb0..885f24b 100644
--- a/absl/functional/bind_front.h
+++ b/absl/functional/bind_front.h
@@ -34,6 +34,8 @@
 #include <functional>  // For std::bind_front.
 #endif  // defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L
 
+#include <utility>
+
 #include "absl/functional/internal/front_binder.h"
 #include "absl/utility/utility.h"
 
@@ -182,8 +184,7 @@
 constexpr functional_internal::bind_front_t<F, BoundArgs...> bind_front(
     F&& func, BoundArgs&&... args) {
   return functional_internal::bind_front_t<F, BoundArgs...>(
-      absl::in_place, absl::forward<F>(func),
-      absl::forward<BoundArgs>(args)...);
+      absl::in_place, std::forward<F>(func), std::forward<BoundArgs>(args)...);
 }
 #endif  // defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L
 
diff --git a/absl/functional/internal/any_invocable.h b/absl/functional/internal/any_invocable.h
index b04436d..c2d8cd4 100644
--- a/absl/functional/internal/any_invocable.h
+++ b/absl/functional/internal/any_invocable.h
@@ -19,11 +19,11 @@
 
 ////////////////////////////////////////////////////////////////////////////////
 //                                                                            //
-// This implementation of the proposed `any_invocable` uses an approach that  //
-// chooses between local storage and remote storage for the contained target  //
-// object based on the target object's size, alignment requirements, and      //
-// whether or not it has a nothrow move constructor. Additional optimizations //
-// are performed when the object is a trivially copyable type [basic.types].  //
+// This implementation chooses between local storage and remote storage for   //
+// the contained target object based on the target object's size, alignment   //
+// requirements, and whether or not it has a nothrow move constructor.        //
+// Additional optimizations are performed when the object is a trivially      //
+// copyable type [basic.types].                                               //
 //                                                                            //
 // There are three datamembers per `AnyInvocable` instance                    //
 //                                                                            //
@@ -39,7 +39,7 @@
 //    target object, directly returning the result.                           //
 //                                                                            //
 // When in the logically empty state, the manager function is an empty        //
-// function and the invoker function is one that would be undefined-behavior  //
+// function and the invoker function is one that would be undefined behavior  //
 // to call.                                                                   //
 //                                                                            //
 // An additional optimization is performed when converting from one           //
@@ -58,12 +58,12 @@
 #include <cstring>
 #include <exception>
 #include <functional>
-#include <initializer_list>
 #include <memory>
 #include <new>
 #include <type_traits>
 #include <utility>
 
+#include "absl/base/attributes.h"
 #include "absl/base/config.h"
 #include "absl/base/internal/invoke.h"
 #include "absl/base/macros.h"
diff --git a/absl/functional/internal/front_binder.h b/absl/functional/internal/front_binder.h
index 45f52de..44a5492 100644
--- a/absl/functional/internal/front_binder.h
+++ b/absl/functional/internal/front_binder.h
@@ -34,8 +34,8 @@
 template <class R, class Tuple, size_t... Idx, class... Args>
 R Apply(Tuple&& bound, absl::index_sequence<Idx...>, Args&&... free) {
   return base_internal::invoke(
-      absl::forward<Tuple>(bound).template get<Idx>()...,
-      absl::forward<Args>(free)...);
+      std::forward<Tuple>(bound).template get<Idx>()...,
+      std::forward<Args>(free)...);
 }
 
 template <class F, class... BoundArgs>
@@ -48,13 +48,13 @@
  public:
   template <class... Ts>
   constexpr explicit FrontBinder(absl::in_place_t, Ts&&... ts)
-      : bound_args_(absl::forward<Ts>(ts)...) {}
+      : bound_args_(std::forward<Ts>(ts)...) {}
 
   template <class... FreeArgs, class R = base_internal::invoke_result_t<
                                    F&, BoundArgs&..., FreeArgs&&...>>
   R operator()(FreeArgs&&... free_args) & {
     return functional_internal::Apply<R>(bound_args_, Idx(),
-                                         absl::forward<FreeArgs>(free_args)...);
+                                         std::forward<FreeArgs>(free_args)...);
   }
 
   template <class... FreeArgs,
@@ -62,7 +62,7 @@
                 const F&, const BoundArgs&..., FreeArgs&&...>>
   R operator()(FreeArgs&&... free_args) const& {
     return functional_internal::Apply<R>(bound_args_, Idx(),
-                                         absl::forward<FreeArgs>(free_args)...);
+                                         std::forward<FreeArgs>(free_args)...);
   }
 
   template <class... FreeArgs, class R = base_internal::invoke_result_t<
@@ -70,8 +70,8 @@
   R operator()(FreeArgs&&... free_args) && {
     // This overload is called when *this is an rvalue. If some of the bound
     // arguments are stored by value or rvalue reference, we move them.
-    return functional_internal::Apply<R>(absl::move(bound_args_), Idx(),
-                                         absl::forward<FreeArgs>(free_args)...);
+    return functional_internal::Apply<R>(std::move(bound_args_), Idx(),
+                                         std::forward<FreeArgs>(free_args)...);
   }
 
   template <class... FreeArgs,
@@ -80,8 +80,8 @@
   R operator()(FreeArgs&&... free_args) const&& {
     // This overload is called when *this is an rvalue. If some of the bound
     // arguments are stored by value or rvalue reference, we move them.
-    return functional_internal::Apply<R>(absl::move(bound_args_), Idx(),
-                                         absl::forward<FreeArgs>(free_args)...);
+    return functional_internal::Apply<R>(std::move(bound_args_), Idx(),
+                                         std::forward<FreeArgs>(free_args)...);
   }
 };
 
diff --git a/absl/functional/overload.h b/absl/functional/overload.h
index 4651f14..7e19e70 100644
--- a/absl/functional/overload.h
+++ b/absl/functional/overload.h
@@ -16,26 +16,26 @@
 // File: overload.h
 // -----------------------------------------------------------------------------
 //
-// `absl::Overload()` returns a functor that provides overloads based on the
-// functors passed to it.
+// `absl::Overload` is a functor that provides overloads based on the functors
+// with which it is created. This can, for example, be used to locally define an
+// anonymous visitor type for `std::visit` inside a function using lambdas.
+//
 // Before using this function, consider whether named function overloads would
 // be a better design.
-// One use case for this is locally defining visitors for `std::visit` inside a
-// function using lambdas.
-
-// Example: Using  `absl::Overload` to define a visitor for `std::variant`.
 //
-// std::variant<int, std::string, double> v(int{1});
+// Note: absl::Overload requires C++17.
 //
-// assert(std::visit(absl::Overload(
-//                        [](int) -> absl::string_view { return "int"; },
-//                        [](const std::string&) -> absl::string_view {
-//                          return "string";
-//                        },
-//                        [](double) -> absl::string_view { return "double"; }),
-//                     v) == "int");
+// Example:
 //
-// Note: This requires C++17.
+//     std::variant<std::string, int32_t, int64_t> v(int32_t{1});
+//     const size_t result =
+//         std::visit(absl::Overload{
+//                        [](const std::string& s) { return s.size(); },
+//                        [](const auto& s) { return sizeof(s); },
+//                    },
+//                    v);
+//     assert(result == 4);
+//
 
 #ifndef ABSL_FUNCTIONAL_OVERLOAD_H_
 #define ABSL_FUNCTIONAL_OVERLOAD_H_
@@ -49,14 +49,30 @@
 #if defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
     ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
 
-template <int&... ExplicitArgumentBarrier, typename... T>
-auto Overload(T&&... ts) {
-  struct OverloadImpl : absl::remove_cvref_t<T>... {
-    using absl::remove_cvref_t<T>::operator()...;
-  };
-  return OverloadImpl{std::forward<T>(ts)...};
-}
+template <typename... T>
+struct Overload final : T... {
+  using T::operator()...;
+
+  // For historical reasons we want to support use that looks like a function
+  // call:
+  //
+  //     absl::Overload(lambda_1, lambda_2)
+  //
+  // This works automatically in C++20 because we have support for parenthesized
+  // aggregate initialization. Before then we must provide a constructor that
+  // makes this work.
+  //
+  constexpr explicit Overload(T... ts) : T(std::move(ts))... {}
+};
+
+// Before C++20, which added support for CTAD for aggregate types, we must also
+// teach the compiler how to deduce the template arguments for Overload.
+//
+template <typename... T>
+Overload(T...) -> Overload<T...>;
+
 #else
+
 namespace functional_internal {
 template <typename T>
 constexpr bool kDependentFalse = false;
@@ -69,6 +85,7 @@
 }
 
 #endif
+
 ABSL_NAMESPACE_END
 }  // namespace absl
 
diff --git a/absl/functional/overload_test.cc b/absl/functional/overload_test.cc
index 739c4c4..fa49d29 100644
--- a/absl/functional/overload_test.cc
+++ b/absl/functional/overload_test.cc
@@ -30,15 +30,14 @@
 
 namespace {
 
-TEST(OverloadTest, DispatchConsidersType) {
-  auto overloaded = absl::Overload(
-      [](int v) -> std::string { return absl::StrCat("int ", v); },        //
-      [](double v) -> std::string { return absl::StrCat("double ", v); },  //
-      [](const char* v) -> std::string {                                   //
-        return absl::StrCat("const char* ", v);                            //
-      },                                                                   //
-      [](auto v) -> std::string { return absl::StrCat("auto ", v); }       //
-  );
+TEST(OverloadTest, DispatchConsidersTypeWithAutoFallback) {
+  auto overloaded = absl::Overload{
+      [](int v) { return absl::StrCat("int ", v); },
+      [](double v) { return absl::StrCat("double ", v); },
+      [](const char* v) { return absl::StrCat("const char* ", v); },
+      [](auto v) { return absl::StrCat("auto ", v); },
+  };
+
   EXPECT_EQ("int 1", overloaded(1));
   EXPECT_EQ("double 2.5", overloaded(2.5));
   EXPECT_EQ("const char* hello", overloaded("hello"));
@@ -46,32 +45,34 @@
 }
 
 TEST(OverloadTest, DispatchConsidersNumberOfArguments) {
-  auto overloaded = absl::Overload(                 //
-      [](int a) { return a + 1; },                  //
-      [](int a, int b) { return a * b; },           //
-      []() -> absl::string_view { return "none"; }  //
-  );
+  auto overloaded = absl::Overload{
+      [](int a) { return a + 1; },
+      [](int a, int b) { return a * b; },
+      []() -> absl::string_view { return "none"; },
+  };
+
   EXPECT_EQ(3, overloaded(2));
   EXPECT_EQ(21, overloaded(3, 7));
   EXPECT_EQ("none", overloaded());
 }
 
 TEST(OverloadTest, SupportsConstantEvaluation) {
-  auto overloaded = absl::Overload(                 //
-      [](int a) { return a + 1; },                  //
-      [](int a, int b) { return a * b; },           //
-      []() -> absl::string_view { return "none"; }  //
-  );
+  auto overloaded = absl::Overload{
+      [](int a) { return a + 1; },
+      [](int a, int b) { return a * b; },
+      []() -> absl::string_view { return "none"; },
+  };
+
   static_assert(overloaded() == "none");
   static_assert(overloaded(2) == 3);
   static_assert(overloaded(3, 7) == 21);
 }
 
 TEST(OverloadTest, PropogatesDefaults) {
-  auto overloaded = absl::Overload(            //
-      [](int a, int b = 5) { return a * b; },  //
-      [](double c) { return c; }               //
-  );
+  auto overloaded = absl::Overload{
+      [](int a, int b = 5) { return a * b; },
+      [](double c) { return c; },
+  };
 
   EXPECT_EQ(21, overloaded(3, 7));
   EXPECT_EQ(35, overloaded(7));
@@ -79,45 +80,82 @@
 }
 
 TEST(OverloadTest, AmbiguousWithDefaultsNotInvocable) {
-  auto overloaded = absl::Overload(            //
-      [](int a, int b = 5) { return a * b; },  //
-      [](int c) { return c; }                  //
-  );
+  auto overloaded = absl::Overload{
+      [](int a, int b = 5) { return a * b; },
+      [](int c) { return c; },
+  };
+
   static_assert(!std::is_invocable_v<decltype(overloaded), int>);
   static_assert(std::is_invocable_v<decltype(overloaded), int, int>);
 }
 
 TEST(OverloadTest, AmbiguousDuplicatesNotInvocable) {
-  auto overloaded = absl::Overload(  //
-      [](int a) { return a; },       //
-      [](int c) { return c; }        //
-  );
+  auto overloaded = absl::Overload{
+      [](int a) { return a; },
+      [](int c) { return c; },
+  };
+
   static_assert(!std::is_invocable_v<decltype(overloaded), int>);
 }
 
 TEST(OverloadTest, AmbiguousConversionNotInvocable) {
-  auto overloaded = absl::Overload(  //
-      [](uint16_t a) { return a; },  //
-      [](uint64_t c) { return c; }   //
-  );
+  auto overloaded = absl::Overload{
+      [](uint16_t a) { return a; },
+      [](uint64_t c) { return c; },
+  };
+
   static_assert(!std::is_invocable_v<decltype(overloaded), int>);
 }
 
+TEST(OverloadTest, AmbiguousConversionWithAutoNotInvocable) {
+  auto overloaded = absl::Overload{
+      [](auto a) { return a; },
+      [](auto c) { return c; },
+  };
+
+  static_assert(!std::is_invocable_v<decltype(overloaded), int>);
+}
+
+#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L
+
+TEST(OverloadTest, AmbiguousConversionWithAutoAndTemplateNotInvocable) {
+  auto overloaded = absl::Overload{
+      [](auto a) { return a; },
+      []<class T>(T c) { return c; },
+  };
+
+  static_assert(!std::is_invocable_v<decltype(overloaded), int>);
+}
+
+TEST(OverloadTest, DispatchConsidersTypeWithTemplateFallback) {
+  auto overloaded = absl::Overload{
+      [](int a) { return a; },
+      []<class T>(T c) { return c * 2; },
+  };
+
+  EXPECT_EQ(7, overloaded(7));
+  EXPECT_EQ(14.0, overloaded(7.0));
+}
+
+#endif  // ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L
+
 TEST(OverloadTest, DispatchConsidersSfinae) {
-  auto overloaded = absl::Overload(                    //
-      [](auto a) -> decltype(a + 1) { return a + 1; }  //
-  );
+  auto overloaded = absl::Overload{
+      [](auto a) -> decltype(a + 1) { return a + 1; },
+  };
+
   static_assert(std::is_invocable_v<decltype(overloaded), int>);
   static_assert(!std::is_invocable_v<decltype(overloaded), std::string>);
 }
 
 TEST(OverloadTest, VariantVisitDispatchesCorrectly) {
   absl::variant<int, double, std::string> v(1);
-  auto overloaded = absl::Overload(
-      [](int) -> absl::string_view { return "int"; },                   //
-      [](double) -> absl::string_view { return "double"; },             //
-      [](const std::string&) -> absl::string_view { return "string"; }  //
-  );
+  auto overloaded = absl::Overload{
+      [](int) -> absl::string_view { return "int"; },
+      [](double) -> absl::string_view { return "double"; },
+      [](const std::string&) -> absl::string_view { return "string"; },
+  };
+
   EXPECT_EQ("int", absl::visit(overloaded, v));
   v = 1.1;
   EXPECT_EQ("double", absl::visit(overloaded, v));
@@ -125,6 +163,51 @@
   EXPECT_EQ("string", absl::visit(overloaded, v));
 }
 
+TEST(OverloadTest, VariantVisitWithAutoFallbackDispatchesCorrectly) {
+  absl::variant<std::string, int32_t, int64_t> v(int32_t{1});
+  auto overloaded = absl::Overload{
+      [](const std::string& s) { return s.size(); },
+      [](const auto& s) { return sizeof(s); },
+  };
+
+  EXPECT_EQ(4, absl::visit(overloaded, v));
+  v = int64_t{1};
+  EXPECT_EQ(8, absl::visit(overloaded, v));
+  v = std::string("hello");
+  EXPECT_EQ(5, absl::visit(overloaded, v));
+}
+
+// This API used to be exported as a function, so it should also work fine to
+// use parantheses when initializing it.
+TEST(OverloadTest, UseWithParentheses) {
+  const auto overloaded =
+      absl::Overload([](const std::string& s) { return s.size(); },
+                     [](const auto& s) { return sizeof(s); });
+
+  absl::variant<std::string, int32_t, int64_t> v(int32_t{1});
+  EXPECT_EQ(4, absl::visit(overloaded, v));
+
+  v = int64_t{1};
+  EXPECT_EQ(8, absl::visit(overloaded, v));
+
+  v = std::string("hello");
+  EXPECT_EQ(5, absl::visit(overloaded, v));
+}
+
+TEST(OverloadTest, HasConstexprConstructor) {
+  constexpr auto overloaded = absl::Overload{
+      [](int v) { return absl::StrCat("int ", v); },
+      [](double v) { return absl::StrCat("double ", v); },
+      [](const char* v) { return absl::StrCat("const char* ", v); },
+      [](auto v) { return absl::StrCat("auto ", v); },
+  };
+
+  EXPECT_EQ("int 1", overloaded(1));
+  EXPECT_EQ("double 2.5", overloaded(2.5));
+  EXPECT_EQ("const char* hello", overloaded("hello"));
+  EXPECT_EQ("auto 1.5", overloaded(1.5f));
+}
+
 }  // namespace
 
 #endif
diff --git a/absl/hash/BUILD.bazel b/absl/hash/BUILD.bazel
index 1e8ad45..fe567e9 100644
--- a/absl/hash/BUILD.bazel
+++ b/absl/hash/BUILD.bazel
@@ -61,7 +61,7 @@
 
 cc_library(
     name = "hash_testing",
-    testonly = 1,
+    testonly = True,
     hdrs = ["hash_testing.h"],
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
@@ -128,7 +128,7 @@
 
 cc_binary(
     name = "hash_benchmark",
-    testonly = 1,
+    testonly = True,
     srcs = ["hash_benchmark.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
@@ -148,7 +148,7 @@
 
 cc_library(
     name = "spy_hash_state",
-    testonly = 1,
+    testonly = True,
     hdrs = ["internal/spy_hash_state.h"],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
diff --git a/absl/hash/hash_benchmark.cc b/absl/hash/hash_benchmark.cc
index d18ea69..9b73f46 100644
--- a/absl/hash/hash_benchmark.cc
+++ b/absl/hash/hash_benchmark.cc
@@ -160,7 +160,7 @@
     return hash<decltype(__VA_ARGS__)>{}(arg);                   \
   }                                                              \
   bool absl_hash_test_odr_use##hash##name =                      \
-      (benchmark::DoNotOptimize(&Codegen##hash##name), false);
+      (benchmark::DoNotOptimize(&Codegen##hash##name), false)
 
 MAKE_BENCHMARK(AbslHash, Int32, int32_t{});
 MAKE_BENCHMARK(AbslHash, Int64, int64_t{});
@@ -315,9 +315,9 @@
   BENCHMARK(BM_latency_##hash##_##name);                     \
   }  // namespace
 
-MAKE_LATENCY_BENCHMARK(AbslHash, Int32, PodRand<int32_t>);
-MAKE_LATENCY_BENCHMARK(AbslHash, Int64, PodRand<int64_t>);
-MAKE_LATENCY_BENCHMARK(AbslHash, String9, StringRand<9>);
-MAKE_LATENCY_BENCHMARK(AbslHash, String33, StringRand<33>);
-MAKE_LATENCY_BENCHMARK(AbslHash, String65, StringRand<65>);
-MAKE_LATENCY_BENCHMARK(AbslHash, String257, StringRand<257>);
+MAKE_LATENCY_BENCHMARK(AbslHash, Int32, PodRand<int32_t>)
+MAKE_LATENCY_BENCHMARK(AbslHash, Int64, PodRand<int64_t>)
+MAKE_LATENCY_BENCHMARK(AbslHash, String9, StringRand<9>)
+MAKE_LATENCY_BENCHMARK(AbslHash, String33, StringRand<33>)
+MAKE_LATENCY_BENCHMARK(AbslHash, String65, StringRand<65>)
+MAKE_LATENCY_BENCHMARK(AbslHash, String257, StringRand<257>)
diff --git a/absl/hash/hash_test.cc b/absl/hash/hash_test.cc
index 59fe8de..7fecf53 100644
--- a/absl/hash/hash_test.cc
+++ b/absl/hash/hash_test.cc
@@ -48,7 +48,7 @@
 #include "absl/types/optional.h"
 #include "absl/types/variant.h"
 
-#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
+#ifdef ABSL_INTERNAL_STD_FILESYSTEM_PATH_HASH_AVAILABLE
 #include <filesystem>  // NOLINT
 #endif
 
diff --git a/absl/hash/internal/hash.cc b/absl/hash/internal/hash.cc
index 11451e5..93906ef 100644
--- a/absl/hash/internal/hash.cc
+++ b/absl/hash/internal/hash.cc
@@ -61,7 +61,7 @@
 
 uint64_t MixingHashState::LowLevelHashImpl(const unsigned char* data,
                                            size_t len) {
-  return LowLevelHash(data, len, Seed(), kHashSalt);
+  return LowLevelHashLenGt16(data, len, Seed(), kHashSalt);
 }
 
 }  // namespace hash_internal
diff --git a/absl/hash/internal/hash.h b/absl/hash/internal/hash.h
index f4a94f9..03bf183 100644
--- a/absl/hash/internal/hash.h
+++ b/absl/hash/internal/hash.h
@@ -24,6 +24,15 @@
 #include <TargetConditionals.h>
 #endif
 
+#include "absl/base/config.h"
+
+// For feature testing and determining which headers can be included.
+#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L
+#include <version>
+#else
+#include <ciso646>
+#endif
+
 #include <algorithm>
 #include <array>
 #include <bitset>
@@ -47,7 +56,6 @@
 #include <utility>
 #include <vector>
 
-#include "absl/base/config.h"
 #include "absl/base/internal/unaligned_access.h"
 #include "absl/base/port.h"
 #include "absl/container/fixed_array.h"
@@ -61,7 +69,7 @@
 #include "absl/types/variant.h"
 #include "absl/utility/utility.h"
 
-#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L && \
+#if defined(__cpp_lib_filesystem) && __cpp_lib_filesystem >= 201703L && \
     !defined(_LIBCPP_HAS_NO_FILESYSTEM_LIBRARY)
 #include <filesystem>  // NOLINT
 #endif
@@ -591,7 +599,9 @@
 #if defined(__cpp_lib_filesystem) && __cpp_lib_filesystem >= 201703L && \
     !defined(_LIBCPP_HAS_NO_FILESYSTEM_LIBRARY) && \
     (!defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) ||        \
-     __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ >= 130000)
+     __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ >= 130000) &&       \
+    (!defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) ||         \
+     __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 101500)
 
 #define ABSL_INTERNAL_STD_FILESYSTEM_PATH_HASH_AVAILABLE 1
 
diff --git a/absl/hash/internal/low_level_hash.cc b/absl/hash/internal/low_level_hash.cc
index b5db0b8..6dc71cf 100644
--- a/absl/hash/internal/low_level_hash.cc
+++ b/absl/hash/internal/low_level_hash.cc
@@ -14,6 +14,9 @@
 
 #include "absl/hash/internal/low_level_hash.h"
 
+#include <cstddef>
+#include <cstdint>
+
 #include "absl/base/internal/unaligned_access.h"
 #include "absl/base/prefetch.h"
 #include "absl/numeric/int128.h"
@@ -28,19 +31,22 @@
   return absl::Uint128Low64(p) ^ absl::Uint128High64(p);
 }
 
-uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed,
-                      const uint64_t salt[5]) {
+uint64_t LowLevelHashLenGt16(const void* data, size_t len, uint64_t seed,
+                             const uint64_t salt[5]) {
   // Prefetch the cacheline that data resides in.
   PrefetchToLocalCache(data);
   const uint8_t* ptr = static_cast<const uint8_t*>(data);
   uint64_t starting_length = static_cast<uint64_t>(len);
+  const uint8_t* last_16_ptr = ptr + starting_length - 16;
   uint64_t current_state = seed ^ salt[0];
 
   if (len > 64) {
     // If we have more than 64 bytes, we're going to handle chunks of 64
     // bytes at a time. We're going to build up two separate hash states
     // which we will then hash together.
-    uint64_t duplicated_state = current_state;
+    uint64_t duplicated_state0 = current_state;
+    uint64_t duplicated_state1 = current_state;
+    uint64_t duplicated_state2 = current_state;
 
     do {
       // Always prefetch the next cacheline.
@@ -55,40 +61,72 @@
       uint64_t g = absl::base_internal::UnalignedLoad64(ptr + 48);
       uint64_t h = absl::base_internal::UnalignedLoad64(ptr + 56);
 
-      uint64_t cs0 = Mix(a ^ salt[1], b ^ current_state);
-      uint64_t cs1 = Mix(c ^ salt[2], d ^ current_state);
-      current_state = (cs0 ^ cs1);
+      current_state = Mix(a ^ salt[1], b ^ current_state);
+      duplicated_state0 = Mix(c ^ salt[2], d ^ duplicated_state0);
 
-      uint64_t ds0 = Mix(e ^ salt[3], f ^ duplicated_state);
-      uint64_t ds1 = Mix(g ^ salt[4], h ^ duplicated_state);
-      duplicated_state = (ds0 ^ ds1);
+      duplicated_state1 = Mix(e ^ salt[3], f ^ duplicated_state1);
+      duplicated_state2 = Mix(g ^ salt[4], h ^ duplicated_state2);
 
       ptr += 64;
       len -= 64;
     } while (len > 64);
 
-    current_state = current_state ^ duplicated_state;
+    current_state = (current_state ^ duplicated_state0) ^
+                    (duplicated_state1 + duplicated_state2);
   }
 
   // We now have a data `ptr` with at most 64 bytes and the current state
   // of the hashing state machine stored in current_state.
-  while (len > 16) {
+  if (len > 32) {
+    uint64_t a = absl::base_internal::UnalignedLoad64(ptr);
+    uint64_t b = absl::base_internal::UnalignedLoad64(ptr + 8);
+    uint64_t c = absl::base_internal::UnalignedLoad64(ptr + 16);
+    uint64_t d = absl::base_internal::UnalignedLoad64(ptr + 24);
+
+    uint64_t cs0 = Mix(a ^ salt[1], b ^ current_state);
+    uint64_t cs1 = Mix(c ^ salt[2], d ^ current_state);
+    current_state = cs0 ^ cs1;
+
+    ptr += 32;
+    len -= 32;
+  }
+
+  // We now have a data `ptr` with at most 32 bytes and the current state
+  // of the hashing state machine stored in current_state.
+  if (len > 16) {
     uint64_t a = absl::base_internal::UnalignedLoad64(ptr);
     uint64_t b = absl::base_internal::UnalignedLoad64(ptr + 8);
 
     current_state = Mix(a ^ salt[1], b ^ current_state);
-
-    ptr += 16;
-    len -= 16;
   }
 
-  // We now have a data `ptr` with at most 16 bytes.
+  // We now have a data `ptr` with at least 1 and at most 16 bytes. But we can
+  // safely read from `ptr + len - 16`.
+  uint64_t a = absl::base_internal::UnalignedLoad64(last_16_ptr);
+  uint64_t b = absl::base_internal::UnalignedLoad64(last_16_ptr + 8);
+
+  return Mix(a ^ salt[1] ^ starting_length, b ^ current_state);
+}
+
+uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed,
+                      const uint64_t salt[5]) {
+  if (len > 16) return LowLevelHashLenGt16(data, len, seed, salt);
+
+  // Prefetch the cacheline that data resides in.
+  PrefetchToLocalCache(data);
+  const uint8_t* ptr = static_cast<const uint8_t*>(data);
+  uint64_t starting_length = static_cast<uint64_t>(len);
+  uint64_t current_state = seed ^ salt[0];
+  if (len == 0) return current_state;
+
   uint64_t a = 0;
   uint64_t b = 0;
+
+  // We now have a data `ptr` with at least 1 and at most 16 bytes.
   if (len > 8) {
     // When we have at least 9 and at most 16 bytes, set A to the first 64
-    // bits of the input and B to the last 64 bits of the input. Yes, they will
-    // overlap in the middle if we are working with less than the full 16
+    // bits of the input and B to the last 64 bits of the input. Yes, they
+    // will overlap in the middle if we are working with less than the full 16
     // bytes.
     a = absl::base_internal::UnalignedLoad64(ptr);
     b = absl::base_internal::UnalignedLoad64(ptr + len - 8);
@@ -97,20 +135,14 @@
     // bits and B to the last 32 bits.
     a = absl::base_internal::UnalignedLoad32(ptr);
     b = absl::base_internal::UnalignedLoad32(ptr + len - 4);
-  } else if (len > 0) {
-    // If we have at least 1 and at most 3 bytes, read all of the provided
-    // bits into A, with some adjustments.
-    a = static_cast<uint64_t>((ptr[0] << 16) | (ptr[len >> 1] << 8) |
-                              ptr[len - 1]);
-    b = 0;
   } else {
-    a = 0;
-    b = 0;
+    // If we have at least 1 and at most 3 bytes, read 2 bytes into A and the
+    // other byte into B, with some adjustments.
+    a = static_cast<uint64_t>((ptr[0] << 8) | ptr[len - 1]);
+    b = static_cast<uint64_t>(ptr[len >> 1]);
   }
 
-  uint64_t w = Mix(a ^ salt[1], b ^ current_state);
-  uint64_t z = salt[1] ^ starting_length;
-  return Mix(w, z);
+  return Mix(a ^ salt[1] ^ starting_length, b ^ current_state);
 }
 
 }  // namespace hash_internal
diff --git a/absl/hash/internal/low_level_hash.h b/absl/hash/internal/low_level_hash.h
index 439968a..d460e35 100644
--- a/absl/hash/internal/low_level_hash.h
+++ b/absl/hash/internal/low_level_hash.h
@@ -43,6 +43,10 @@
 uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed,
                       const uint64_t salt[5]);
 
+// Same as above except the length must be greater than 16.
+uint64_t LowLevelHashLenGt16(const void* data, size_t len, uint64_t seed,
+                             const uint64_t salt[5]);
+
 }  // namespace hash_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/absl/hash/internal/low_level_hash_test.cc b/absl/hash/internal/low_level_hash_test.cc
index 589a3d8..ea781dd 100644
--- a/absl/hash/internal/low_level_hash_test.cc
+++ b/absl/hash/internal/low_level_hash_test.cc
@@ -406,99 +406,99 @@
 
 #if defined(ABSL_IS_BIG_ENDIAN)
   constexpr uint64_t kGolden[kNumGoldenOutputs] = {
-      0xe5a40d39ab796423, 0x1766974bf7527d81, 0x5c3bbbe230db17a8,
-      0xa6630143a7e6aa6f, 0x17645cb7318b86b,  0x218b175f30ba61f8,
-      0xa6564b468248c683, 0xef192f401b116e1c, 0xbe8dc0c54617639d,
-      0xe7b01610fc22dbb8, 0x99d9f694404af913, 0xf4eecd37464b45c5,
-      0x7d2c653d63596d9b, 0x3f15c8544ec5393a, 0x6b9dc0c1704f796c,
-      0xf1ded7a7eae5ed5a, 0x2db2fd7c6dd4641b, 0x151ca2d3d4cd33ab,
-      0xa5af5994ac2ccd64, 0x2b2a4ca3191d2fce, 0xf89e68c9364e7c05,
-      0x71724c70b799c21,  0x70536fabfd157369, 0xdee92794c3c3082b,
-      0xac033a6743d3b3eb, 0xed2956b506cd5151, 0xbd669644755264b6,
-      0x6ab1ff5d5f549a63, 0xf6bd551a2e3e04e,  0x7b5a8cef6875ea73,
-      0x22bccf4d4db0a91c, 0x4f2bc07754c7c7eb, 0xfb6b8342a86725db,
-      0x13a1a0d4c5854da,  0x5f6e44655f7dedac, 0x54a9198dff2bdf85,
-      0xdb17e6915d4e4042, 0xa69926cf5c3b89f,  0xf77f031bfd74c096,
-      0x1d6f916fdd50ec3c, 0x334ac76013ade393, 0x99370f899111de15,
-      0x352457a03ada6de,  0x341974d4f42d854d, 0xda89ab02872aeb5,
-      0x6ec2b74e143b10d9, 0x6f284c0b5cd60522, 0xf9670de353438f88,
-      0xde920913adf0a2b4, 0xb7a07d7c0c17a8ec, 0x879a69f558ba3a98,
-      0x360cf6d802df20f9, 0x53530f8046673738, 0xbd8f5f2bcf35e483,
-      0x3f171f047144b983, 0x644d04e820823465, 0x50e44773a20b2702,
-      0xe584ed4c05c745dd, 0x9a825c85b95ab6c0, 0xbce2931deb74e775,
-      0x10468e9e705c7cfe, 0x12e01de3104141e2, 0x5c11ae2ee3713abd,
-      0x6ac5ffb0860319e6, 0xc1e6da1849d30fc9, 0xa0e4d247a458b447,
-      0x4530d4615c32b89b, 0x116aa09107a76505, 0xf941339d00d9bb73,
-      0x573a0fc1615afb33, 0xa975c81dc868b258, 0x3ab2c5250ab54bda,
-      0x37f99f208a3e3b11, 0x4b49b0ff706689d,  0x30bafa0b8f0a87fe,
-      0xea6787a65cc20cdd, 0x55861729f1fc3ab8, 0xea38e009c5be9b72,
-      0xcb8522cba33c3c66, 0x352e77653fe306f3, 0xe0bb760793bac064,
-      0xf66ec59322662956, 0x637aa320455d56f8, 0x46ee546be5824a89,
-      0x9e6842421e83d8a4, 0xf98ac2bc96b9fb8c, 0xf2c1002fd9a70b99,
-      0x4c2b62b1e39e9405, 0x3248555fa3ade9c4, 0xd4d04c37f6417c21,
-      0xf40cd506b1bf5653, 0x6c45d6005c760d2f, 0x61d88a7e61ff0d7e,
-      0x131591e8a53cc967, 0xdae85cb9bc29bab6, 0xe98835334905e626,
-      0x7cce50a2b66b8754, 0x5b0b3d0c5ac498ae, 0xd35a218c974d1756,
-      0xfce436ddc1d003c,  0xd183901de90bb741, 0x9378f8f34974a66,
-      0x21f11ae0a0402368, 0xf2fbd7c94ef89cb6, 0xc329c69d0f0d080b,
-      0xf2841cba16216a61, 0x47aba97b44916df1, 0x724d4e00a8019fcf,
-      0x2df9005c2a728d63, 0xc788892a1a5d7515, 0x9e993a65f9df0480,
-      0x76876721ff49f969, 0xbe7a796cfba15bf5, 0xa4c8bd54586f5488,
-      0xb390a325275501ab, 0x893f11317427ccf1, 0x92f2bb57da5695b9,
-      0x30985b90da88269f, 0x2c690e268e086de8, 0x1c02df6097997196,
-      0x1f9778f8bbdf6455, 0x7d57378c7bf8416d, 0xba8582a5f8d84d38,
-      0xe8ca43b85050be4e, 0x5048cf6bed8a5d9f, 0xfbc5ba80917d0ea4,
-      0x8011026525bf1691, 0x26b8dc6aed9fb50d, 0x191f5bfee77c1fe3,
-      0xdd497891465a2cc1, 0x6f1fe8c57a33072e, 0x2c9f4ec078c460c0,
-      0x9a725bde8f6a1437, 0x6ce545fa3ef61e4d,
+      0x4c34aacf38f6eee4, 0x88b1366815e50b88, 0x1a36bd0c6150fb9c,
+      0xa783aba8a67366c7, 0x5e4a92123ae874f2, 0x0cc9ecf27067ee9a,
+      0xbe77aa94940527f9, 0x7ea5c12f2669fe31, 0xa33eed8737d946b9,
+      0x310aec5b1340bb36, 0x354e400861c5d8ff, 0x15be98166adcf42f,
+      0xc51910b62a90ae51, 0x539d47fc7fdf6a1f, 0x3ebba9daa46eef93,
+      0xd96bcd3a9113c17f, 0xc78eaf6256ded15a, 0x98902ed321c2f0d9,
+      0x75a4ac96414b954a, 0x2cb90e00a39e307b, 0x46539574626c3637,
+      0x186ec89a2be3ff45, 0x972a3bf7531519d2, 0xa14df0d25922364b,
+      0xa351e19d22752109, 0x08bd311d8fed4f82, 0xea2b52ddc6af54f9,
+      0x5f20549941338336, 0xd43b07422dc2782e, 0x377c68e2acda4835,
+      0x1b31a0a663b1d7b3, 0x7388ba5d68058a1a, 0xe382794ea816f032,
+      0xd4c3fe7889276ee0, 0x2833030545582ea9, 0x554d32a55e55df32,
+      0x8d6d33d7e17b424d, 0xe51a193d03ae1e34, 0xabb6a80835bd66b3,
+      0x0e4ba5293f9ce9b7, 0x1ebd8642cb762cdf, 0xcb54b555850888ee,
+      0x1e4195e4717c701f, 0x6235a13937f6532a, 0xd460960741e845c0,
+      0x2a72168a2d6af7b1, 0x6be38fbbfc5b17de, 0x4ee97cffa0d0fb39,
+      0xfdf1119ad5e71a55, 0x0dff7f66b3070727, 0x812d791d6ed62744,
+      0x60962919074b70b8, 0x956fa5c7d6872547, 0xee892daa58aae597,
+      0xeeda546e998ee369, 0x454481f5eb9b1fa8, 0x1054394634c98b1b,
+      0x55bb425415f591fb, 0x9601fa97416232c4, 0xd7a18506519daad7,
+      0x90935cb5de039acf, 0xe64054c5146ed359, 0xe5b323fb1e866c09,
+      0x10a472555f5ba1bc, 0xe3c0cd57d26e0972, 0x7ca3db7c121da3e8,
+      0x7004a89c800bb466, 0x865f69c1a1ff7f39, 0xbe0edd48f0cf2b99,
+      0x10e5e4ba3cc400f5, 0xafc2b91a220eef50, 0x6f04a259289b24f1,
+      0x2179a8070e880ef0, 0xd6a9a3d023a740c2, 0x96e6d7954755d9b8,
+      0xc8e4bddecce5af9f, 0x93941f0fbc724c92, 0xbef5fb15bf76a479,
+      0x534dca8f5da86529, 0x70789790feec116b, 0x2a296e167eea1fe9,
+      0x54cb1efd2a3ec7ea, 0x357b43897dfeb9f7, 0xd1eda89bc7ff89d3,
+      0x434f2e10cbb83c98, 0xeec4cdac46ca69ce, 0xd46aafd52a303206,
+      0x4bf05968ff50a5c9, 0x71c533747a6292df, 0xa40bd0d16a36118c,
+      0x597b4ee310c395ab, 0xc5b3e3e386172583, 0x12ca0b32284e6c70,
+      0xb48995fadcf35630, 0x0646368454cd217d, 0xa21c168e40d765b5,
+      0x4260d3811337da30, 0xb72728a01cff78e4, 0x8586920947f4756f,
+      0xc21e5f853cae7dc1, 0xf08c9533be9de285, 0x72df06653b4256d6,
+      0xf7b7f937f8db1779, 0x976db27dd0418127, 0x9ce863b7bc3f9e00,
+      0xebb679854fcf3a0a, 0x2ccebabbcf1afa99, 0x44201d6be451dac5,
+      0xb4af71c0e9a537d1, 0xad8fe9bb33ed2681, 0xcb30128bb68df43b,
+      0x154d8328903e8d07, 0x5844276dabeabdff, 0xd99017d7d36d930b,
+      0xabb0b4774fb261ca, 0x0a43f075d62e67e0, 0x8df7b371355ada6b,
+      0xf4c7a40d06513dcf, 0x257a3615955a0372, 0x987ac410bba74c06,
+      0xa011a46f25a632a2, 0xa14384b963ddd995, 0xf51b6b8cf9d50ba7,
+      0x3acdb91ee3abf18d, 0x34e799be08920e8c, 0x8766748a31304b36,
+      0x0aa239d5d0092f2e, 0xadf473ed26628594, 0xc4094b798eb4b79b,
+      0xe04ee5f33cd130f4, 0x85045d098c341d46, 0xf936cdf115a890ec,
+      0x51d137b6d8d2eb4f, 0xd10738bb2fccc1ef,
   };
 #else
   constexpr uint64_t kGolden[kNumGoldenOutputs] = {
-      0xe5a40d39ab796423, 0x1766974bf7527d81, 0x5c3bbbe230db17a8,
-      0xa6630143a7e6aa6f, 0x8787cb2d04b0c984, 0x33603654ff574ac2,
-      0xa6564b468248c683, 0xef192f401b116e1c, 0xbe8dc0c54617639d,
-      0x93d7f665b5521c8e, 0x646d70bb42445f28, 0x96a7b1e3cc9bd426,
-      0x76020289ab0790c4, 0x39f842e4133b9b44, 0x2b8d7047be4bcaab,
-      0x99628abef6716a97, 0x4432e02ba42b2740, 0x74d810efcad7918a,
-      0x88c84e986002507f, 0x4f99acf193cf39b9, 0xd90e7a3655891e37,
-      0x3bb378b1d4df8fcf, 0xf78e94045c052d47, 0x26da0b2130da6b40,
-      0x30b4d426af8c6986, 0x5413b4aaf3baaeae, 0x756ab265370a1597,
-      0xdaf5f4b7d09814fb, 0x8f874ae37742b75e, 0x8fecd03956121ce8,
-      0x229c292ea7a08285, 0x0bb4bf0692d14bae, 0x207b24ca3bdac1db,
-      0x64f6cd6745d3825b, 0xa2b2e1656b58df1e, 0x0d01d30d9ee7a148,
-      0x1cb4cd00ab804e3b, 0x4697f2637fd90999, 0x8383a756b5688c07,
-      0x695c29cb3696a975, 0xda2e5a5a5e971521, 0x7935d4befa056b2b,
-      0x38dd541ca95420fe, 0xcc06c7a4963f967f, 0xbf0f6f66e232fb20,
-      0xf7efb32d373fe71a, 0xe2e64634b1c12660, 0x285b8fd1638e306d,
-      0x658e8a4e3b714d6c, 0xf391fb968e0eb398, 0x744a9ea0cc144bf2,
-      0x12636f2be11012f1, 0x29c57de825948f80, 0x58c6f99ab0d1c021,
-      0x13e7b5a7b82fe3bb, 0x10fbc87901e02b63, 0xa24c9184901b748b,
-      0xcac4fd4c5080e581, 0xc38bdb7483ba68e1, 0xdb2a8069b2ceaffa,
-      0xdf9fe91d0d1c7887, 0xe83f49e96e2e6a08, 0x0c69e61b62ca2b62,
-      0xb4a4f3f85f8298fe, 0x167a1b39e1e95f41, 0xf8a2a5649855ee41,
-      0x27992565b595c498, 0x3e08cca5b71f9346, 0xad406b10c770a6d2,
-      0xd1713ce6e552bcf2, 0x753b287194c73ad3, 0x5ae41a95f600af1c,
-      0x4a61163b86a8bb4c, 0x42eeaa79e760c7e4, 0x698df622ef465b0a,
-      0x157583111e1a6026, 0xaa1388f078e793e0, 0xf10d68d0f3309360,
-      0x2af056184457a3de, 0x6d0058e1590b2489, 0x638f287f68817f12,
-      0xc46b71fecefd5467, 0x2c8e94679d964e0a, 0x8612b797ce22503a,
-      0x59f929babfba7170, 0x9527556923fb49a0, 0x1039ab644f5e150b,
-      0x7816c83f3aa05e6d, 0xf51d2f564518c619, 0x67d494cff03ac004,
-      0x2802d636ced1cfbb, 0xf64e20bad771cb12, 0x0b9a6cf84a83e15e,
-      0x8da6630319609301, 0x40946a86e2a996f3, 0xcab7f5997953fa76,
-      0x39129ca0e04fc465, 0x5238221fd685e1b8, 0x175130c407dbcaab,
-      0x02f20e7536c0b0df, 0x2742cb488a04ad56, 0xd6afb593879ff93b,
-      0xf50ad64caac0ca7f, 0x2ade95c4261364ae, 0x5c4f3299faacd07a,
-      0xfffe3bff0ae5e9bc, 0x1db785c0005166e4, 0xea000d962ad18418,
-      0xe42aef38359362d9, 0xc8e95657348a3891, 0xc162eca864f238c6,
-      0xbe1fb373e20579ad, 0x628a1d4f40aa6ffd, 0xa87bdb7456340f90,
-      0x5960ef3ba982c801, 0x5026586df9a431ec, 0xfe4b8a20fdf0840b,
-      0xdcb761867da7072f, 0xc10d4653667275b7, 0x727720deec13110b,
-      0x710b009662858dc9, 0xfbf8f7a3ecac1eb7, 0xb6fc4fcd0722e3df,
-      0x7cb86dcc55104aac, 0x19e71e9b45c3a51e, 0x51de38573c2bea48,
-      0xa73ab6996d6df158, 0x55ef2b8c930817b2, 0xb2850bf5fae87157,
-      0xecf3de1acd04651f, 0xcc0a40552559ff32, 0xc385c374f20315b1,
-      0xb90208a4c7234183, 0x58aa1ca7a4c075d9,
+      0x4c34aacf38f6eee4, 0x88b1366815e50b88, 0x1a36bd0c6150fb9c,
+      0xa783aba8a67366c7, 0xbc89ebdc622314e4, 0x632bc3cfcc7544d8,
+      0xbe77aa94940527f9, 0x7ea5c12f2669fe31, 0xa33eed8737d946b9,
+      0x74d832ea11fd18ab, 0x49c0487486246cdc, 0x3fdd986c87ddb0a0,
+      0xac3fa52a64d7c09a, 0xbff0e330196e7ed2, 0x8c8138d3ad7d3cce,
+      0x968c7d4b48e93778, 0xa04c78d3a421f529, 0x8854bc9c3c3c0241,
+      0xcccfcdf5a41113fe, 0xe6fc63dc543d984d, 0x00a39ff89e903c05,
+      0xaf7e9da25f9a26f9, 0x6e269a13d01a43df, 0x846d2300ce2ecdf8,
+      0xe7ea8c8f08478260, 0x9a2db0d62f6232f3, 0x6f66c761d168c59f,
+      0x55f9feacaae82043, 0x518084043700f614, 0xb0c8cfc11bead99f,
+      0xe4a68fdab6359d80, 0x97b17caa8f92236e, 0x96edf5e8363643dc,
+      0x9b3fbcd8d5b254cd, 0x22a263621d9b3a8b, 0xde90bf6f81800a6d,
+      0x1b51cae38c2e9513, 0x689215b3c414ef21, 0x064dc85afae8f557,
+      0xa2f3a8b51f408378, 0x6907c197ec1f6a3b, 0xfe83a42ef5c1cf13,
+      0x9b8b1d8f7a20cc13, 0x1f1681d52ca895d0, 0xd7b1670bf28e0f96,
+      0xb32f20f82d8b038a, 0x6a61d030fb2f5253, 0x8eb2bb0bc29ebb39,
+      0x144f36f7a9eef95c, 0xe77aa47d29808d8c, 0xf14d34c1fc568bad,
+      0x9796dcd4383f3c73, 0xa2f685fc1be7225b, 0xf3791295b16068b1,
+      0xb6b8f63424618948, 0x8ac4fd587045db19, 0x7e2aec2c34feb72e,
+      0x72e135a6910ccbb1, 0x661ff16f3c904e6f, 0xdf92cf9d67ca092d,
+      0x98a9953d79722eef, 0xe0649ed2181d1707, 0xcd8b8478636a297b,
+      0x9516258709c8471b, 0xc703b675b51f4394, 0xdb740eae020139f3,
+      0x57d1499ac4212ff2, 0x355cc03713d43825, 0x0e71ac9b8b1e101e,
+      0x8029fa72258ff559, 0xa2159726b4c16a50, 0x04e61582fba43007,
+      0xdab25af835be8cce, 0x13510b1b184705ee, 0xabdbc9e53666fdeb,
+      0x94a788fcb8173cef, 0x750d5e031286e722, 0x02559e72f4f5b497,
+      0x7d6e0e5996a646fa, 0x66e871b73b014132, 0x2ec170083f8b784f,
+      0x34ac9540cfce3fd9, 0x75c5622c6aad1295, 0xf799a6bb2651acc1,
+      0x8f6bcd3145bdc452, 0xddd9d326eb584a04, 0x5411af1e3532f8dc,
+      0xeb34722f2ad0f509, 0x835bc952a82298cc, 0xeb3839ff60ea92ad,
+      0x70bddf1bcdc8a4bc, 0x4bfb3ee86fcde525, 0xc7b3b93b81dfa386,
+      0xe66db544d57997e8, 0xf68a1b83fd363187, 0xe9b99bec615b171b,
+      0x093fba04d04ad28a, 0xba6117ed4231a303, 0x594bef25f9d4e206,
+      0x0a8cba60578b8f67, 0x88f6c7ca10b06019, 0x32a74082aef17b08,
+      0xe758222f971e22df, 0x4af14ff4a593e51e, 0xdba651e16cb09044,
+      0x3f3ac837d181eaac, 0xa5589a3f89610c01, 0xd409a7c3a18d5643,
+      0x8a89444f82962f26, 0x22eb62a13b9771b9, 0xd3a617615256ddd8,
+      0x7089b990c4bba297, 0x7d752893783eac4f, 0x1f2fcbb79372c915,
+      0x67a4446b17eb9839, 0x70d11df5cae46788, 0x52621e1780b47d0f,
+      0xcf63b93a6e590ee6, 0xb6bc96b58ee064b8, 0x2587f8d635ca9c75,
+      0xc6bddd62ec5e5d01, 0x957398ad3009cdb7, 0x05b6890b20bcd0d3,
+      0xbe6e965ff837222e, 0x47383a87d2b04b1a, 0x7d42207e6d8d7950,
+      0x7e981ed12a7f4aa3, 0xdebb05b30769441a, 0xaac5d86f4ff76c49,
+      0x384f195ca3248331, 0xec4c4b855e909ca1, 0x6a7eeb5a657d73d5,
+      0x9efbebe2fa9c2791, 0x19e7fa0546900c4d,
   };
 #endif
 
diff --git a/absl/hash/internal/spy_hash_state.h b/absl/hash/internal/spy_hash_state.h
index 0972826..357c301 100644
--- a/absl/hash/internal/spy_hash_state.h
+++ b/absl/hash/internal/spy_hash_state.h
@@ -149,20 +149,20 @@
                                              const unsigned char* begin,
                                              size_t size) {
     const size_t large_chunk_stride = PiecewiseChunkSize();
-    if (size > large_chunk_stride) {
-      // Combining a large contiguous buffer must have the same effect as
-      // doing it piecewise by the stride length, followed by the (possibly
-      // empty) remainder.
-      while (size >= large_chunk_stride) {
-        hash_state = SpyHashStateImpl::combine_contiguous(
-            std::move(hash_state), begin, large_chunk_stride);
-        begin += large_chunk_stride;
-        size -= large_chunk_stride;
-      }
+    // Combining a large contiguous buffer must have the same effect as
+    // doing it piecewise by the stride length, followed by the (possibly
+    // empty) remainder.
+    while (size > large_chunk_stride) {
+      hash_state = SpyHashStateImpl::combine_contiguous(
+          std::move(hash_state), begin, large_chunk_stride);
+      begin += large_chunk_stride;
+      size -= large_chunk_stride;
     }
 
-    hash_state.hash_representation_.emplace_back(
-        reinterpret_cast<const char*>(begin), size);
+    if (size > 0) {
+      hash_state.hash_representation_.emplace_back(
+          reinterpret_cast<const char*>(begin), size);
+    }
     return hash_state;
   }
 
diff --git a/absl/log/BUILD.bazel b/absl/log/BUILD.bazel
index 40e87cc..b13cf4d 100644
--- a/absl/log/BUILD.bazel
+++ b/absl/log/BUILD.bazel
@@ -243,9 +243,6 @@
     hdrs = ["absl_vlog_is_on.h"],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
-    visibility = [
-        "//absl/log:__subpackages__",
-    ],
     deps = [
         "//absl/base:config",
         "//absl/base:core_headers",
@@ -259,9 +256,6 @@
     hdrs = ["vlog_is_on.h"],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
-    visibility = [
-        "//absl/log:__subpackages__",
-    ],
     deps = [
         ":absl_vlog_is_on",
     ],
@@ -295,6 +289,7 @@
 cc_test(
     name = "absl_check_test",
     size = "small",
+    timeout = "moderate",
     srcs = ["absl_check_test.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
@@ -328,6 +323,7 @@
 cc_test(
     name = "check_test",
     size = "small",
+    timeout = "moderate",
     srcs = ["check_test.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
@@ -361,6 +357,8 @@
         "//absl/base:core_headers",
         "//absl/log/internal:test_helpers",
         "//absl/status",
+        "//absl/strings",
+        "//absl/strings:string_view",
         "@com_google_googletest//:gtest",
     ],
 )
@@ -450,6 +448,7 @@
         "//absl/log:globals",
         "//absl/log:log_entry",
         "//absl/log:scoped_mock_log",
+        "//absl/log/internal:globals",
         "//absl/log/internal:test_actions",
         "//absl/log/internal:test_helpers",
         "//absl/log/internal:test_matchers",
@@ -667,6 +666,7 @@
         ":log_entry",
         ":log_sink",
         ":log_sink_registry",
+        ":vlog_is_on",
         "//absl/base:core_headers",
         "//absl/base:log_severity",
         "//absl/flags:flag",
diff --git a/absl/log/CMakeLists.txt b/absl/log/CMakeLists.txt
index a7d8b69..4384465 100644
--- a/absl/log/CMakeLists.txt
+++ b/absl/log/CMakeLists.txt
@@ -290,6 +290,7 @@
   LINKOPTS
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
+    absl::core_headers
     absl::log_internal_message
     absl::log_internal_nullstream
     absl::log_severity
@@ -788,6 +789,8 @@
     absl::core_headers
     absl::log_internal_test_helpers
     absl::status
+    absl::strings
+    absl::string_view
     GTest::gmock_main
 )
 
@@ -807,6 +810,7 @@
     absl::log_entry
     absl::log_globals
     absl::log_severity
+    absl::log_internal_globals
     absl::log_internal_test_actions
     absl::log_internal_test_helpers
     absl::log_internal_test_matchers
@@ -830,6 +834,8 @@
     absl::core_headers
     absl::log_internal_test_helpers
     absl::status
+    absl::strings
+    absl::string_view
     GTest::gmock_main
 )
 
diff --git a/absl/log/absl_log_basic_test.cc b/absl/log/absl_log_basic_test.cc
index 3a4b83c..7378f5a 100644
--- a/absl/log/absl_log_basic_test.cc
+++ b/absl/log/absl_log_basic_test.cc
@@ -16,6 +16,7 @@
 #include "absl/log/absl_log.h"
 
 #define ABSL_TEST_LOG ABSL_LOG
+#define ABSL_TEST_DLOG ABSL_DLOG
 
 #include "gtest/gtest.h"
 #include "absl/log/log_basic_test_impl.inc"
diff --git a/absl/log/absl_vlog_is_on.h b/absl/log/absl_vlog_is_on.h
index 29096b4..6bf6c41 100644
--- a/absl/log/absl_vlog_is_on.h
+++ b/absl/log/absl_vlog_is_on.h
@@ -46,12 +46,12 @@
 // Files which do not match any pattern in `--vmodule` use the value of `--v` as
 // their effective verbosity level.  The default is 0.
 //
-// SetVLOGLevel helper function is provided to do limited dynamic control over
+// SetVLogLevel helper function is provided to do limited dynamic control over
 // V-logging by appending to `--vmodule`. Because these go at the beginning of
 // the list, they take priority over any globs previously added.
 //
 // Resetting --vmodule will override all previous modifications to `--vmodule`,
-// including via SetVLOGLevel.
+// including via SetVLogLevel.
 
 #ifndef ABSL_LOG_ABSL_VLOG_IS_ON_H_
 #define ABSL_LOG_ABSL_VLOG_IS_ON_H_
@@ -77,7 +77,7 @@
 // Each ABSL_VLOG_IS_ON call site gets its own VLogSite that registers with the
 // global linked list of sites to asynchronously update its verbosity level on
 // changes to --v or --vmodule. The verbosity can also be set by manually
-// calling SetVLOGLevel.
+// calling SetVLogLevel.
 //
 // ABSL_VLOG_IS_ON is not async signal safe, but it is guaranteed not to
 // allocate new memory.
diff --git a/absl/log/check_test_impl.inc b/absl/log/check_test_impl.inc
index d5c0aee..6431810 100644
--- a/absl/log/check_test_impl.inc
+++ b/absl/log/check_test_impl.inc
@@ -31,6 +31,8 @@
 #include "absl/base/config.h"
 #include "absl/log/internal/test_helpers.h"
 #include "absl/status/status.h"
+#include "absl/strings/string_view.h"
+#include "absl/strings/substitute.h"
 
 // NOLINTBEGIN(misc-definitions-in-headers)
 
@@ -521,6 +523,162 @@
           "Check failed: v1 == v2 (ComparableType{1} vs. ComparableType{2})"));
 }
 
+// A type that can be printed using AbslStringify.
+struct StringifiableType {
+  int x = 0;
+  explicit StringifiableType(int x) : x(x) {}
+  friend bool operator==(const StringifiableType& lhs,
+                         const StringifiableType& rhs) {
+    return lhs.x == rhs.x;
+  }
+  friend bool operator!=(const StringifiableType& lhs,
+                         const StringifiableType& rhs) {
+    return lhs.x != rhs.x;
+  }
+  friend bool operator<(const StringifiableType& lhs,
+                        const StringifiableType& rhs) {
+    return lhs.x < rhs.x;
+  }
+  friend bool operator>(const StringifiableType& lhs,
+                        const StringifiableType& rhs) {
+    return lhs.x > rhs.x;
+  }
+  friend bool operator<=(const StringifiableType& lhs,
+                         const StringifiableType& rhs) {
+    return lhs.x <= rhs.x;
+  }
+  friend bool operator>=(const StringifiableType& lhs,
+                         const StringifiableType& rhs) {
+    return lhs.x >= rhs.x;
+  }
+  template <typename Sink>
+  friend void AbslStringify(Sink& sink, const StringifiableType& obj) {
+    absl::Format(&sink, "StringifiableType{%d}", obj.x);
+  }
+
+  // Make sure no unintended copy happens.
+  StringifiableType(const StringifiableType&) = delete;
+};
+
+TEST(CHECKTest, TestUserDefinedAbslStringify) {
+  const StringifiableType v1(1);
+  const StringifiableType v2(2);
+
+  ABSL_TEST_CHECK_EQ(v1, v1);
+  ABSL_TEST_CHECK_NE(v1, v2);
+  ABSL_TEST_CHECK_LT(v1, v2);
+  ABSL_TEST_CHECK_LE(v1, v2);
+  ABSL_TEST_CHECK_GT(v2, v1);
+  ABSL_TEST_CHECK_GE(v2, v1);
+}
+
+TEST(CHECKDeathTest, TestUserDefinedAbslStringify) {
+  const StringifiableType v1(1);
+  const StringifiableType v2(2);
+
+  // Returns a matcher for the expected check failure message when comparing two
+  // values.
+  auto expected_output = [](int lhs, absl::string_view condition, int rhs) {
+    return HasSubstr(
+        absl::Substitute("Check failed: v$0 $1 v$2 (StringifiableType{$0} vs. "
+                         "StringifiableType{$2})",
+                         lhs, condition, rhs));
+  };
+  // Test comparisons where the check fails.
+  EXPECT_DEATH(ABSL_TEST_CHECK_EQ(v1, v2), expected_output(1, "==", 2));
+  EXPECT_DEATH(ABSL_TEST_CHECK_NE(v1, v1), expected_output(1, "!=", 1));
+  EXPECT_DEATH(ABSL_TEST_CHECK_LT(v2, v1), expected_output(2, "<", 1));
+  EXPECT_DEATH(ABSL_TEST_CHECK_LE(v2, v1), expected_output(2, "<=", 1));
+  EXPECT_DEATH(ABSL_TEST_CHECK_GT(v1, v2), expected_output(1, ">", 2));
+  EXPECT_DEATH(ABSL_TEST_CHECK_GE(v1, v2), expected_output(1, ">=", 2));
+}
+
+// A type that can be printed using both AbslStringify and operator<<.
+struct StringifiableStreamableType {
+  int x = 0;
+  explicit StringifiableStreamableType(int x) : x(x) {}
+
+  friend bool operator==(const StringifiableStreamableType& lhs,
+                         const StringifiableStreamableType& rhs) {
+    return lhs.x == rhs.x;
+  }
+  friend bool operator!=(const StringifiableStreamableType& lhs,
+                         const StringifiableStreamableType& rhs) {
+    return lhs.x != rhs.x;
+  }
+  template <typename Sink>
+  friend void AbslStringify(Sink& sink,
+                            const StringifiableStreamableType& obj) {
+    absl::Format(&sink, "Strigified{%d}", obj.x);
+  }
+  friend std::ostream& operator<<(std::ostream& out,
+                                  const StringifiableStreamableType& obj) {
+    return out << "Streamed{" << obj.x << "}";
+  }
+
+  // Avoid unintentional copy.
+  StringifiableStreamableType(const StringifiableStreamableType&) = delete;
+};
+
+TEST(CHECKDeathTest, TestStreamingPreferredOverAbslStringify) {
+  StringifiableStreamableType v1(1);
+  StringifiableStreamableType v2(2);
+
+  EXPECT_DEATH(
+      ABSL_TEST_CHECK_EQ(v1, v2),
+      HasSubstr("Check failed: v1 == v2 (Streamed{1} vs. Streamed{2})"));
+}
+
+// A type whose pointer can be passed to AbslStringify.
+struct PointerIsStringifiable {};
+template <typename Sink>
+void AbslStringify(Sink& sink, const PointerIsStringifiable* var) {
+  sink.Append("PointerIsStringifiable");
+}
+
+// Verifies that a pointer is printed as a number despite having AbslStringify
+// defined. Users may implement AbslStringify that dereferences the pointer, and
+// doing so as part of DCHECK would not be good.
+TEST(CHECKDeathTest, TestPointerPrintedAsNumberDespiteAbslStringify) {
+  const auto* p = reinterpret_cast<const PointerIsStringifiable*>(0x1234);
+
+#ifdef _MSC_VER
+  EXPECT_DEATH(
+      ABSL_TEST_CHECK_EQ(p, nullptr),
+      HasSubstr("Check failed: p == nullptr (0000000000001234 vs. (null))"));
+#else   // _MSC_VER
+  EXPECT_DEATH(ABSL_TEST_CHECK_EQ(p, nullptr),
+               HasSubstr("Check failed: p == nullptr (0x1234 vs. (null))"));
+#endif  // _MSC_VER
+}
+
+// An uncopyable object with operator<<.
+struct Uncopyable {
+  int x;
+  explicit Uncopyable(int x) : x(x) {}
+  Uncopyable(const Uncopyable&) = delete;
+  friend bool operator==(const Uncopyable& lhs, const Uncopyable& rhs) {
+    return lhs.x == rhs.x;
+  }
+  friend bool operator!=(const Uncopyable& lhs, const Uncopyable& rhs) {
+    return lhs.x != rhs.x;
+  }
+  friend std::ostream& operator<<(std::ostream& os, const Uncopyable& obj) {
+    return os << "Uncopyable{" << obj.x << "}";
+  }
+};
+
+// Test that an uncopyable object can be used.
+// Will catch us if implementation has an unintended copy.
+TEST(CHECKDeathTest, TestUncopyable) {
+  const Uncopyable v1(1);
+  const Uncopyable v2(2);
+
+  EXPECT_DEATH(
+      ABSL_TEST_CHECK_EQ(v1, v2),
+      HasSubstr("Check failed: v1 == v2 (Uncopyable{1} vs. Uncopyable{2})"));
+}
+
 }  // namespace absl_log_internal
 
 // NOLINTEND(misc-definitions-in-headers)
diff --git a/absl/log/die_if_null.h b/absl/log/die_if_null.h
index 127a9ac..f773aa8 100644
--- a/absl/log/die_if_null.h
+++ b/absl/log/die_if_null.h
@@ -55,7 +55,7 @@
 // `line` location. Called when `ABSL_DIE_IF_NULL` fails. Calling this function
 // generates less code than its implementation would if inlined, for a slight
 // code size reduction each time `ABSL_DIE_IF_NULL` is called.
-ABSL_ATTRIBUTE_NORETURN ABSL_ATTRIBUTE_NOINLINE void DieBecauseNull(
+[[noreturn]] ABSL_ATTRIBUTE_NOINLINE void DieBecauseNull(
     const char* file, int line, const char* exprtext);
 
 // Helper for `ABSL_DIE_IF_NULL`.
diff --git a/absl/log/globals.h b/absl/log/globals.h
index b36e47e..4feec40 100644
--- a/absl/log/globals.h
+++ b/absl/log/globals.h
@@ -140,7 +140,7 @@
 //
 // This option tells the logging library that every logged message
 // should include the prefix (severity, date, time, PID, etc.)
-
+//
 // ShouldPrependLogPrefix()
 //
 // Returns the value of the Prepend Log Prefix option.
@@ -154,25 +154,38 @@
 void EnableLogPrefix(bool on_off);
 
 //------------------------------------------------------------------------------
-// Set Global VLOG Level
+// `VLOG` Configuration
 //------------------------------------------------------------------------------
 //
-// Sets the global `(ABSL_)VLOG(_IS_ON)` level to `log_level`.  This level is
-// applied to any sites whose filename doesn't match any `module_pattern`.
-// Returns the prior value.
-inline int SetGlobalVLogLevel(int log_level) {
-  return absl::log_internal::UpdateGlobalVLogLevel(log_level);
+// These methods set the `(ABSL_)VLOG(_IS_ON)` threshold.  They allow
+// programmatic control of the thresholds set by the --v and --vmodule flags.
+//
+// Only `VLOG`s with a severity level LESS THAN OR EQUAL TO the threshold will
+// be evaluated.
+//
+// For example, if the threshold is 2, then:
+//
+//   VLOG(2) << "This message will be logged.";
+//   VLOG(3) << "This message will NOT be logged.";
+//
+// The default threshold is 0. Since `VLOG` levels must not be negative, a
+// negative threshold value will turn off all VLOGs.
+
+// SetGlobalVLogLevel()
+//
+// Sets the global `VLOG` level to threshold. Returns the previous global
+// threshold.
+inline int SetGlobalVLogLevel(int threshold) {
+  return absl::log_internal::UpdateGlobalVLogLevel(threshold);
 }
 
-//------------------------------------------------------------------------------
-// Set VLOG Level
-//------------------------------------------------------------------------------
+// SetVLogLevel()
 //
-// Sets `(ABSL_)VLOG(_IS_ON)` level for `module_pattern` to `log_level`.  This
-// allows programmatic control of what is normally set by the --vmodule flag.
-// Returns the level that previously applied to `module_pattern`.
-inline int SetVLogLevel(absl::string_view module_pattern, int log_level) {
-  return absl::log_internal::PrependVModule(module_pattern, log_level);
+// Sets the `VLOG` threshold for all files that match `module_pattern`,
+// overwriting any prior value. Files that don't match aren't affected.
+// Returns the threshold that previously applied to `module_pattern`.
+inline int SetVLogLevel(absl::string_view module_pattern, int threshold) {
+  return absl::log_internal::PrependVModule(module_pattern, threshold);
 }
 
 //------------------------------------------------------------------------------
diff --git a/absl/log/internal/BUILD.bazel b/absl/log/internal/BUILD.bazel
index 1be1349..2dbf337 100644
--- a/absl/log/internal/BUILD.bazel
+++ b/absl/log/internal/BUILD.bazel
@@ -266,6 +266,7 @@
     deps = [
         ":log_message",
         ":nullstream",
+        "//absl/base:core_headers",
         "//absl/base:log_severity",
     ],
 )
@@ -384,7 +385,9 @@
     hdrs = ["vlog_config.h"],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
-    visibility = ["//absl/log:__subpackages__"],
+    visibility = [
+        "//absl/log:__subpackages__",
+    ],
     deps = [
         "//absl/base",
         "//absl/base:config",
@@ -400,7 +403,7 @@
 
 cc_binary(
     name = "vlog_config_benchmark",
-    testonly = 1,
+    testonly = True,
     srcs = ["vlog_config_benchmark.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
diff --git a/absl/log/internal/check_op.cc b/absl/log/internal/check_op.cc
index f4b6764..23c4a3b 100644
--- a/absl/log/internal/check_op.cc
+++ b/absl/log/internal/check_op.cc
@@ -16,6 +16,10 @@
 
 #include <string.h>
 
+#include <ostream>
+
+#include "absl/strings/string_view.h"
+
 #ifdef _MSC_VER
 #define strcasecmp _stricmp
 #else
@@ -113,6 +117,22 @@
 DEFINE_CHECK_STROP_IMPL(CHECK_STRCASENE, strcasecmp, false)
 #undef DEFINE_CHECK_STROP_IMPL
 
+namespace detect_specialization {
+
+StringifySink::StringifySink(std::ostream& os) : os_(os) {}
+
+void StringifySink::Append(absl::string_view text) { os_ << text; }
+
+void StringifySink::Append(size_t length, char ch) {
+  for (size_t i = 0; i < length; ++i) os_.put(ch);
+}
+
+void AbslFormatFlush(StringifySink* sink, absl::string_view text) {
+  sink->Append(text);
+}
+
+}  // namespace detect_specialization
+
 }  // namespace log_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/absl/log/internal/check_op.h b/absl/log/internal/check_op.h
index 11f0f40..2159220 100644
--- a/absl/log/internal/check_op.h
+++ b/absl/log/internal/check_op.h
@@ -24,9 +24,11 @@
 
 #include <stdint.h>
 
+#include <cstddef>
 #include <ostream>
 #include <sstream>
 #include <string>
+#include <type_traits>
 #include <utility>
 
 #include "absl/base/attributes.h"
@@ -35,6 +37,8 @@
 #include "absl/log/internal/nullguard.h"
 #include "absl/log/internal/nullstream.h"
 #include "absl/log/internal/strip.h"
+#include "absl/strings/has_absl_stringify.h"
+#include "absl/strings/string_view.h"
 
 // `ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL` wraps string literals that
 // should be stripped when `ABSL_MIN_LOG_LEVEL` exceeds `kFatal`.
@@ -58,13 +62,13 @@
 #endif
 
 #define ABSL_LOG_INTERNAL_CHECK_OP(name, op, val1, val1_text, val2, val2_text) \
-  while (                                                                      \
-      ::std::string* absl_log_internal_check_op_result ABSL_ATTRIBUTE_UNUSED = \
-          ::absl::log_internal::name##Impl(                                    \
-              ::absl::log_internal::GetReferenceableValue(val1),               \
-              ::absl::log_internal::GetReferenceableValue(val2),               \
-              ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL(val1_text                 \
-                                                     " " #op " " val2_text)))  \
+  while (::std::string* absl_log_internal_check_op_result                      \
+             ABSL_LOG_INTERNAL_ATTRIBUTE_UNUSED_IF_STRIP_LOG =                 \
+                 ::absl::log_internal::name##Impl(                             \
+                     ::absl::log_internal::GetReferenceableValue(val1),        \
+                     ::absl::log_internal::GetReferenceableValue(val2),        \
+                     ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL(                   \
+                         val1_text " " #op " " val2_text)))                    \
     ABSL_LOG_INTERNAL_CONDITION_FATAL(STATELESS, true)                         \
   ABSL_LOG_INTERNAL_CHECK(*absl_log_internal_check_op_result).InternalStream()
 #define ABSL_LOG_INTERNAL_QCHECK_OP(name, op, val1, val1_text, val2, \
@@ -287,6 +291,44 @@
                                            std::declval<const T&>()))
 Detect(char);
 
+// A sink for AbslStringify which redirects everything to a std::ostream.
+class StringifySink {
+ public:
+  explicit StringifySink(std::ostream& os ABSL_ATTRIBUTE_LIFETIME_BOUND);
+
+  void Append(absl::string_view text);
+  void Append(size_t length, char ch);
+  friend void AbslFormatFlush(StringifySink* sink, absl::string_view text);
+
+ private:
+  std::ostream& os_;
+};
+
+// Wraps a type implementing AbslStringify, and implements operator<<.
+template <typename T>
+class StringifyToStreamWrapper {
+ public:
+  explicit StringifyToStreamWrapper(const T& v ABSL_ATTRIBUTE_LIFETIME_BOUND)
+      : v_(v) {}
+
+  friend std::ostream& operator<<(std::ostream& os,
+                                  const StringifyToStreamWrapper& wrapper) {
+    StringifySink sink(os);
+    AbslStringify(sink, wrapper.v_);
+    return os;
+  }
+
+ private:
+  const T& v_;
+};
+
+// This overload triggers when T implements AbslStringify.
+// StringifyToStreamWrapper is used to allow MakeCheckOpString to use
+// operator<<.
+template <typename T>
+std::enable_if_t<HasAbslStringify<T>::value,
+                 StringifyToStreamWrapper<T>>
+Detect(...);  // Ellipsis has lowest preference when int passed.
 }  // namespace detect_specialization
 
 template <typename T>
@@ -342,20 +384,20 @@
 // `(int, int)` override works around the issue that the compiler will not
 // instantiate the template version of the function on values of unnamed enum
 // type.
-#define ABSL_LOG_INTERNAL_CHECK_OP_IMPL(name, op)                        \
-  template <typename T1, typename T2>                                    \
-  inline constexpr ::std::string* name##Impl(const T1& v1, const T2& v2, \
-                                             const char* exprtext) {     \
-    using U1 = CheckOpStreamType<T1>;                                    \
-    using U2 = CheckOpStreamType<T2>;                                    \
-    return ABSL_PREDICT_TRUE(v1 op v2)                                   \
-               ? nullptr                                                 \
-               : ABSL_LOG_INTERNAL_CHECK_OP_IMPL_RESULT(U1, U2, v1, v2,  \
-                                                        exprtext);       \
-  }                                                                      \
-  inline constexpr ::std::string* name##Impl(int v1, int v2,             \
-                                             const char* exprtext) {     \
-    return name##Impl<int, int>(v1, v2, exprtext);                       \
+#define ABSL_LOG_INTERNAL_CHECK_OP_IMPL(name, op)                          \
+  template <typename T1, typename T2>                                      \
+  inline constexpr ::std::string* name##Impl(const T1& v1, const T2& v2,   \
+                                             const char* exprtext) {       \
+    using U1 = CheckOpStreamType<T1>;                                      \
+    using U2 = CheckOpStreamType<T2>;                                      \
+    return ABSL_PREDICT_TRUE(v1 op v2)                                     \
+               ? nullptr                                                   \
+               : ABSL_LOG_INTERNAL_CHECK_OP_IMPL_RESULT(U1, U2, U1(v1),    \
+                                                        U2(v2), exprtext); \
+  }                                                                        \
+  inline constexpr ::std::string* name##Impl(int v1, int v2,               \
+                                             const char* exprtext) {       \
+    return name##Impl<int, int>(v1, v2, exprtext);                         \
   }
 
 ABSL_LOG_INTERNAL_CHECK_OP_IMPL(Check_EQ, ==)
diff --git a/absl/log/internal/conditions.h b/absl/log/internal/conditions.h
index 645f3c2..9dc15db 100644
--- a/absl/log/internal/conditions.h
+++ b/absl/log/internal/conditions.h
@@ -230,8 +230,8 @@
 
 // Helper routines to abort the application quietly
 
-ABSL_ATTRIBUTE_NORETURN inline void AbortQuietly() { abort(); }
-ABSL_ATTRIBUTE_NORETURN inline void ExitQuietly() { _exit(1); }
+[[noreturn]] inline void AbortQuietly() { abort(); }
+[[noreturn]] inline void ExitQuietly() { _exit(1); }
 }  // namespace log_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/absl/log/internal/log_impl.h b/absl/log/internal/log_impl.h
index 99de6db..a67f2f3 100644
--- a/absl/log/internal/log_impl.h
+++ b/absl/log/internal/log_impl.h
@@ -35,14 +35,14 @@
 #ifndef NDEBUG
 #define ABSL_LOG_INTERNAL_DLOG_IMPL(severity)            \
   ABSL_LOG_INTERNAL_CONDITION##severity(STATELESS, true) \
-      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+      ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
 #else
 #define ABSL_LOG_INTERNAL_DLOG_IMPL(severity)             \
   ABSL_LOG_INTERNAL_CONDITION##severity(STATELESS, false) \
-      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+      ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
 #endif
 
-// The `switch` ensures that this expansion is the begnning of a statement (as
+// The `switch` ensures that this expansion is the beginning of a statement (as
 // opposed to an expression). The use of both `case 0` and `default` is to
 // suppress a compiler warning.
 #define ABSL_LOG_INTERNAL_VLOG_IMPL(verbose_level)                         \
@@ -58,7 +58,7 @@
   switch (const int absl_logging_internal_verbose_level = (verbose_level)) \
   case 0:                                                                  \
   default:                                                                 \
-    ABSL_LOG_INTERNAL_LOG_IF_IMPL(                                         \
+    ABSL_LOG_INTERNAL_DLOG_IF_IMPL(                                         \
         _INFO, ABSL_VLOG_IS_ON(absl_logging_internal_verbose_level))       \
         .WithVerbosity(absl_logging_internal_verbose_level)
 #else
@@ -66,7 +66,7 @@
   switch (const int absl_logging_internal_verbose_level = (verbose_level))    \
   case 0:                                                                     \
   default:                                                                    \
-    ABSL_LOG_INTERNAL_LOG_IF_IMPL(                                            \
+    ABSL_LOG_INTERNAL_DLOG_IF_IMPL(                                            \
         _INFO, false && ABSL_VLOG_IS_ON(absl_logging_internal_verbose_level)) \
         .WithVerbosity(absl_logging_internal_verbose_level)
 #endif
@@ -82,11 +82,11 @@
 #ifndef NDEBUG
 #define ABSL_LOG_INTERNAL_DLOG_IF_IMPL(severity, condition)   \
   ABSL_LOG_INTERNAL_CONDITION##severity(STATELESS, condition) \
-      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+      ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
 #else
 #define ABSL_LOG_INTERNAL_DLOG_IF_IMPL(severity, condition)              \
   ABSL_LOG_INTERNAL_CONDITION##severity(STATELESS, false && (condition)) \
-      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+      ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
 #endif
 
 // ABSL_LOG_EVERY_N
@@ -132,36 +132,36 @@
 #ifndef NDEBUG
 #define ABSL_LOG_INTERNAL_DLOG_EVERY_N_IMPL(severity, n) \
   ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, true)       \
-  (EveryN, n) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+  (EveryN, n) ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
 
 #define ABSL_LOG_INTERNAL_DLOG_FIRST_N_IMPL(severity, n) \
   ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, true)       \
-  (FirstN, n) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+  (FirstN, n) ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
 
 #define ABSL_LOG_INTERNAL_DLOG_EVERY_POW_2_IMPL(severity) \
   ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, true)        \
-  (EveryPow2) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+  (EveryPow2) ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
 
 #define ABSL_LOG_INTERNAL_DLOG_EVERY_N_SEC_IMPL(severity, n_seconds) \
   ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, true)                   \
-  (EveryNSec, n_seconds) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+  (EveryNSec, n_seconds) ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
 
 #else  // def NDEBUG
 #define ABSL_LOG_INTERNAL_DLOG_EVERY_N_IMPL(severity, n) \
   ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, false)      \
-  (EveryN, n) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+  (EveryN, n) ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
 
 #define ABSL_LOG_INTERNAL_DLOG_FIRST_N_IMPL(severity, n) \
   ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, false)      \
-  (FirstN, n) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+  (FirstN, n) ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
 
 #define ABSL_LOG_INTERNAL_DLOG_EVERY_POW_2_IMPL(severity) \
   ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, false)       \
-  (EveryPow2) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+  (EveryPow2) ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
 
 #define ABSL_LOG_INTERNAL_DLOG_EVERY_N_SEC_IMPL(severity, n_seconds) \
   ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, false)                  \
-  (EveryNSec, n_seconds) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+  (EveryNSec, n_seconds) ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
 #endif  // def NDEBUG
 
 #define ABSL_LOG_INTERNAL_VLOG_EVERY_N_IMPL(verbose_level, n)                \
@@ -243,40 +243,40 @@
 #ifndef NDEBUG
 #define ABSL_LOG_INTERNAL_DLOG_IF_EVERY_N_IMPL(severity, condition, n)  \
   ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(EveryN, n) \
-      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+      ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
 
 #define ABSL_LOG_INTERNAL_DLOG_IF_FIRST_N_IMPL(severity, condition, n)  \
   ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(FirstN, n) \
-      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+      ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
 
 #define ABSL_LOG_INTERNAL_DLOG_IF_EVERY_POW_2_IMPL(severity, condition) \
   ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(EveryPow2) \
-      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+      ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
 
 #define ABSL_LOG_INTERNAL_DLOG_IF_EVERY_N_SEC_IMPL(severity, condition, \
                                                    n_seconds)           \
   ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(EveryNSec, \
                                                              n_seconds) \
-      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+      ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
 
 #else  // def NDEBUG
 #define ABSL_LOG_INTERNAL_DLOG_IF_EVERY_N_IMPL(severity, condition, n)   \
   ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, false && (condition))( \
-      EveryN, n) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+      EveryN, n) ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
 
 #define ABSL_LOG_INTERNAL_DLOG_IF_FIRST_N_IMPL(severity, condition, n)   \
   ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, false && (condition))( \
-      FirstN, n) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+      FirstN, n) ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
 
 #define ABSL_LOG_INTERNAL_DLOG_IF_EVERY_POW_2_IMPL(severity, condition)  \
   ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, false && (condition))( \
-      EveryPow2) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+      EveryPow2) ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
 
 #define ABSL_LOG_INTERNAL_DLOG_IF_EVERY_N_SEC_IMPL(severity, condition,  \
                                                    n_seconds)            \
   ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, false && (condition))( \
       EveryNSec, n_seconds)                                              \
-      ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
+      ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream()
 #endif  // def NDEBUG
 
 #endif  // ABSL_LOG_INTERNAL_LOG_IMPL_H_
diff --git a/absl/log/internal/log_message.cc b/absl/log/internal/log_message.cc
index 10ac245..4e9b08a 100644
--- a/absl/log/internal/log_message.cc
+++ b/absl/log/internal/log_message.cc
@@ -27,6 +27,7 @@
 #include <algorithm>
 #include <array>
 #include <atomic>
+#include <ios>
 #include <memory>
 #include <ostream>
 #include <string>
@@ -67,7 +68,14 @@
 namespace {
 // message `logging.proto.Event`
 enum EventTag : uint8_t {
+  kFileName = 2,
+  kFileLine = 3,
+  kTimeNsecs = 4,
+  kSeverity = 5,
+  kThreadId = 6,
   kValue = 7,
+  kSequenceNumber = 9,
+  kThreadName = 10,
 };
 
 // message `logging.proto.Value`
@@ -100,6 +108,23 @@
   return true;
 }
 
+// See `logging.proto.Severity`
+int32_t ProtoSeverity(absl::LogSeverity severity, int verbose_level) {
+  switch (severity) {
+    case absl::LogSeverity::kInfo:
+      if (verbose_level == absl::LogEntry::kNoVerbosityLevel) return 800;
+      return 600 - verbose_level;
+    case absl::LogSeverity::kWarning:
+      return 900;
+    case absl::LogSeverity::kError:
+      return 950;
+    case absl::LogSeverity::kFatal:
+      return 1100;
+    default:
+      return 800;
+  }
+}
+
 absl::string_view Basename(absl::string_view filepath) {
 #ifdef _WIN32
   size_t path = filepath.find_last_of("/\\");
@@ -145,26 +170,37 @@
 
   // A `logging.proto.Event` proto message is built into `encoded_buf`.
   std::array<char, kLogMessageBufferSize> encoded_buf;
-  // `encoded_remaining` is the suffix of `encoded_buf` that has not been filled
-  // yet.  If a datum to be encoded does not fit into `encoded_remaining` and
-  // cannot be truncated to fit, the size of `encoded_remaining` will be zeroed
-  // to prevent encoding of any further data.  Note that in this case its data()
-  // pointer will not point past the end of `encoded_buf`.
-  absl::Span<char> encoded_remaining;
+  // `encoded_remaining()` is the suffix of `encoded_buf` that has not been
+  // filled yet.  If a datum to be encoded does not fit into
+  // `encoded_remaining()` and cannot be truncated to fit, the size of
+  // `encoded_remaining()` will be zeroed to prevent encoding of any further
+  // data.  Note that in this case its `data()` pointer will not point past the
+  // end of `encoded_buf`.
+  // The first use of `encoded_remaining()` is our chance to record metadata
+  // after any modifications (e.g. by `AtLocation()`) but before any data have
+  // been recorded.  We want to record metadata before data so that data are
+  // preferentially truncated if we run out of buffer.
+  absl::Span<char>& encoded_remaining() {
+    if (encoded_remaining_actual_do_not_use_directly.data() == nullptr) {
+      encoded_remaining_actual_do_not_use_directly =
+          absl::MakeSpan(encoded_buf);
+      InitializeEncodingAndFormat();
+    }
+    return encoded_remaining_actual_do_not_use_directly;
+  }
+  absl::Span<char> encoded_remaining_actual_do_not_use_directly;
 
   // A formatted string message is built in `string_buf`.
   std::array<char, kLogMessageBufferSize> string_buf;
 
+  void InitializeEncodingAndFormat();
   void FinalizeEncodingAndFormat();
 };
 
 LogMessage::LogMessageData::LogMessageData(const char* file, int line,
                                            absl::LogSeverity severity,
                                            absl::Time timestamp)
-    : extra_sinks_only(false),
-      manipulated(nullptr),
-      // This `absl::MakeSpan` silences spurious -Wuninitialized from GCC:
-      encoded_remaining(absl::MakeSpan(encoded_buf)) {
+    : extra_sinks_only(false), manipulated(nullptr) {
   // Legacy defaults for LOG's ostream:
   manipulated.setf(std::ios_base::showbase | std::ios_base::boolalpha);
   entry.full_filename_ = file;
@@ -177,13 +213,25 @@
   entry.tid_ = absl::base_internal::GetCachedTID();
 }
 
+void LogMessage::LogMessageData::InitializeEncodingAndFormat() {
+  EncodeStringTruncate(EventTag::kFileName, entry.source_filename(),
+                       &encoded_remaining());
+  EncodeVarint(EventTag::kFileLine, entry.source_line(), &encoded_remaining());
+  EncodeVarint(EventTag::kTimeNsecs, absl::ToUnixNanos(entry.timestamp()),
+               &encoded_remaining());
+  EncodeVarint(EventTag::kSeverity,
+               ProtoSeverity(entry.log_severity(), entry.verbosity()),
+               &encoded_remaining());
+  EncodeVarint(EventTag::kThreadId, entry.tid(), &encoded_remaining());
+}
+
 void LogMessage::LogMessageData::FinalizeEncodingAndFormat() {
-  // Note that `encoded_remaining` may have zero size without pointing past the
-  // end of `encoded_buf`, so the difference between `data()` pointers is used
-  // to compute the size of `encoded_data`.
+  // Note that `encoded_remaining()` may have zero size without pointing past
+  // the end of `encoded_buf`, so the difference between `data()` pointers is
+  // used to compute the size of `encoded_data`.
   absl::Span<const char> encoded_data(
       encoded_buf.data(),
-      static_cast<size_t>(encoded_remaining.data() - encoded_buf.data()));
+      static_cast<size_t>(encoded_remaining().data() - encoded_buf.data()));
   // `string_remaining` is the suffix of `string_buf` that has not been filled
   // yet.
   absl::Span<char> string_remaining(string_buf);
@@ -211,7 +259,6 @@
         if (PrintValue(string_remaining, field.bytes_value())) continue;
         break;
     }
-    break;
   }
   auto chars_written =
       static_cast<size_t>(string_remaining.data() - string_buf.data());
@@ -413,7 +460,7 @@
   data_->FinalizeEncodingAndFormat();
   data_->entry.encoding_ =
       absl::string_view(data_->encoded_buf.data(),
-                        static_cast<size_t>(data_->encoded_remaining.data() -
+                        static_cast<size_t>(data_->encoded_remaining().data() -
                                             data_->encoded_buf.data()));
   SendToLog();
 }
@@ -421,7 +468,7 @@
 void LogMessage::SetFailQuietly() { data_->fail_quietly = true; }
 
 LogMessage::OstreamView::OstreamView(LogMessageData& message_data)
-    : data_(message_data), encoded_remaining_copy_(data_.encoded_remaining) {
+    : data_(message_data), encoded_remaining_copy_(data_.encoded_remaining()) {
   // This constructor sets the `streambuf` up so that streaming into an attached
   // ostream encodes string data in-place.  To do that, we write appropriate
   // headers into the buffer using a copy of the buffer view so that we can
@@ -444,8 +491,8 @@
   if (!string_start_.data()) {
     // The second field header didn't fit.  Whether the first one did or not, we
     // shouldn't commit `encoded_remaining_copy_`, and we also need to zero the
-    // size of `data_->encoded_remaining` so that no more data are encoded.
-    data_.encoded_remaining.remove_suffix(data_.encoded_remaining.size());
+    // size of `data_->encoded_remaining()` so that no more data are encoded.
+    data_.encoded_remaining().remove_suffix(data_.encoded_remaining().size());
     return;
   }
   const absl::Span<const char> contents(pbase(),
@@ -454,7 +501,7 @@
   encoded_remaining_copy_.remove_prefix(contents.size());
   EncodeMessageLength(string_start_, &encoded_remaining_copy_);
   EncodeMessageLength(message_start_, &encoded_remaining_copy_);
-  data_.encoded_remaining = encoded_remaining_copy_;
+  data_.encoded_remaining() = encoded_remaining_copy_;
 }
 
 std::ostream& LogMessage::OstreamView::stream() { return data_.manipulated; }
@@ -521,13 +568,13 @@
   view.stream() << ") ";
 }
 
-// Encodes into `data_->encoded_remaining` a partial `logging.proto.Event`
+// Encodes into `data_->encoded_remaining()` a partial `logging.proto.Event`
 // containing the specified string data using a `Value` field appropriate to
 // `str_type`.  Truncates `str` if necessary, but emits nothing and marks the
 // buffer full if  even the field headers do not fit.
 template <LogMessage::StringType str_type>
 void LogMessage::CopyToEncodedBuffer(absl::string_view str) {
-  auto encoded_remaining_copy = data_->encoded_remaining;
+  auto encoded_remaining_copy = data_->encoded_remaining();
   auto start = EncodeMessageStart(
       EventTag::kValue, BufferSizeFor(WireType::kLengthDelimited) + str.size(),
       &encoded_remaining_copy);
@@ -540,11 +587,11 @@
                            str, &encoded_remaining_copy)) {
     // The string may have been truncated, but the field header fit.
     EncodeMessageLength(start, &encoded_remaining_copy);
-    data_->encoded_remaining = encoded_remaining_copy;
+    data_->encoded_remaining() = encoded_remaining_copy;
   } else {
-    // The field header(s) did not fit; zero `encoded_remaining` so we don't
+    // The field header(s) did not fit; zero `encoded_remaining()` so we don't
     // write anything else later.
-    data_->encoded_remaining.remove_suffix(data_->encoded_remaining.size());
+    data_->encoded_remaining().remove_suffix(data_->encoded_remaining().size());
   }
 }
 template void LogMessage::CopyToEncodedBuffer<LogMessage::StringType::kLiteral>(
@@ -553,7 +600,7 @@
     LogMessage::StringType::kNotLiteral>(absl::string_view str);
 template <LogMessage::StringType str_type>
 void LogMessage::CopyToEncodedBuffer(char ch, size_t num) {
-  auto encoded_remaining_copy = data_->encoded_remaining;
+  auto encoded_remaining_copy = data_->encoded_remaining();
   auto value_start = EncodeMessageStart(
       EventTag::kValue, BufferSizeFor(WireType::kLengthDelimited) + num,
       &encoded_remaining_copy);
@@ -566,11 +613,11 @@
     log_internal::AppendTruncated(ch, num, encoded_remaining_copy);
     EncodeMessageLength(str_start, &encoded_remaining_copy);
     EncodeMessageLength(value_start, &encoded_remaining_copy);
-    data_->encoded_remaining = encoded_remaining_copy;
+    data_->encoded_remaining() = encoded_remaining_copy;
   } else {
-    // The field header(s) did not fit; zero `encoded_remaining` so we don't
+    // The field header(s) did not fit; zero `encoded_remaining()` so we don't
     // write anything else later.
-    data_->encoded_remaining.remove_suffix(data_->encoded_remaining.size());
+    data_->encoded_remaining().remove_suffix(data_->encoded_remaining().size());
   }
 }
 template void LogMessage::CopyToEncodedBuffer<LogMessage::StringType::kLiteral>(
@@ -578,6 +625,13 @@
 template void LogMessage::CopyToEncodedBuffer<
     LogMessage::StringType::kNotLiteral>(char ch, size_t num);
 
+// We intentionally don't return from these destructors. Disable MSVC's warning
+// about the destructor never returning as we do so intentionally here.
+#if defined(_MSC_VER) && !defined(__clang__)
+#pragma warning(push)
+#pragma warning(disable : 4722)
+#endif
+
 LogMessageFatal::LogMessageFatal(const char* file, int line)
     : LogMessage(file, line, absl::LogSeverity::kFatal) {}
 
@@ -587,19 +641,29 @@
   *this << "Check failed: " << failure_msg << " ";
 }
 
-// ABSL_ATTRIBUTE_NORETURN doesn't seem to work on destructors with msvc, so
-// disable msvc's warning about the d'tor never returning.
-#if defined(_MSC_VER) && !defined(__clang__)
-#pragma warning(push)
-#pragma warning(disable : 4722)
-#endif
 LogMessageFatal::~LogMessageFatal() {
   Flush();
   FailWithoutStackTrace();
 }
-#if defined(_MSC_VER) && !defined(__clang__)
-#pragma warning(pop)
-#endif
+
+LogMessageDebugFatal::LogMessageDebugFatal(const char* file, int line)
+    : LogMessage(file, line, absl::LogSeverity::kFatal) {}
+
+LogMessageDebugFatal::~LogMessageDebugFatal() {
+  Flush();
+  FailWithoutStackTrace();
+}
+
+LogMessageQuietlyDebugFatal::LogMessageQuietlyDebugFatal(const char* file,
+                                                         int line)
+    : LogMessage(file, line, absl::LogSeverity::kFatal) {
+  SetFailQuietly();
+}
+
+LogMessageQuietlyDebugFatal::~LogMessageQuietlyDebugFatal() {
+  Flush();
+  FailQuietly();
+}
 
 LogMessageQuietlyFatal::LogMessageQuietlyFatal(const char* file, int line)
     : LogMessage(file, line, absl::LogSeverity::kFatal) {
@@ -608,17 +672,10 @@
 
 LogMessageQuietlyFatal::LogMessageQuietlyFatal(const char* file, int line,
                                                absl::string_view failure_msg)
-    : LogMessage(file, line, absl::LogSeverity::kFatal) {
-  SetFailQuietly();
-  *this << "Check failed: " << failure_msg << " ";
+    : LogMessageQuietlyFatal(file, line) {
+    *this << "Check failed: " << failure_msg << " ";
 }
 
-// ABSL_ATTRIBUTE_NORETURN doesn't seem to work on destructors with msvc, so
-// disable msvc's warning about the d'tor never returning.
-#if defined(_MSC_VER) && !defined(__clang__)
-#pragma warning(push)
-#pragma warning(disable : 4722)
-#endif
 LogMessageQuietlyFatal::~LogMessageQuietlyFatal() {
   Flush();
   FailQuietly();
diff --git a/absl/log/internal/log_message.h b/absl/log/internal/log_message.h
index 4ecb8a1..0c067da 100644
--- a/absl/log/internal/log_message.h
+++ b/absl/log/internal/log_message.h
@@ -187,11 +187,11 @@
  protected:
   // Call `abort()` or similar to perform `LOG(FATAL)` crash.  It is assumed
   // that the caller has already generated and written the trace as appropriate.
-  ABSL_ATTRIBUTE_NORETURN static void FailWithoutStackTrace();
+  [[noreturn]] static void FailWithoutStackTrace();
 
   // Similar to `FailWithoutStackTrace()`, but without `abort()`.  Terminates
   // the process with an error exit code.
-  ABSL_ATTRIBUTE_NORETURN static void FailQuietly();
+  [[noreturn]] static void FailQuietly();
 
   // Dispatches the completed `absl::LogEntry` to applicable `absl::LogSink`s.
   // This might as well be inlined into `~LogMessage` except that
@@ -354,15 +354,34 @@
   LogMessageFatal(const char* file, int line) ABSL_ATTRIBUTE_COLD;
   LogMessageFatal(const char* file, int line,
                   absl::string_view failure_msg) ABSL_ATTRIBUTE_COLD;
-  ABSL_ATTRIBUTE_NORETURN ~LogMessageFatal();
+  [[noreturn]] ~LogMessageFatal();
 };
 
+// `LogMessageDebugFatal` ensures the process will exit in failure after logging
+// this message. It matches LogMessageFatal but is not [[noreturn]] as it's used
+// for DLOG(FATAL) variants.
+class LogMessageDebugFatal final : public LogMessage {
+ public:
+  LogMessageDebugFatal(const char* file, int line) ABSL_ATTRIBUTE_COLD;
+  ~LogMessageDebugFatal();
+};
+
+class LogMessageQuietlyDebugFatal final : public LogMessage {
+ public:
+  // DLOG(QFATAL) calls this instead of LogMessageQuietlyFatal to make sure the
+  // destructor is not [[noreturn]] even if this is always FATAL as this is only
+  // invoked when DLOG() is enabled.
+  LogMessageQuietlyDebugFatal(const char* file, int line) ABSL_ATTRIBUTE_COLD;
+  ~LogMessageQuietlyDebugFatal();
+};
+
+// Used for LOG(QFATAL) to make sure it's properly understood as [[noreturn]].
 class LogMessageQuietlyFatal final : public LogMessage {
  public:
   LogMessageQuietlyFatal(const char* file, int line) ABSL_ATTRIBUTE_COLD;
   LogMessageQuietlyFatal(const char* file, int line,
                          absl::string_view failure_msg) ABSL_ATTRIBUTE_COLD;
-  ABSL_ATTRIBUTE_NORETURN ~LogMessageQuietlyFatal();
+  [[noreturn]] ~LogMessageQuietlyFatal();
 };
 
 }  // namespace log_internal
diff --git a/absl/log/internal/nullstream.h b/absl/log/internal/nullstream.h
index 9266852..973e91a 100644
--- a/absl/log/internal/nullstream.h
+++ b/absl/log/internal/nullstream.h
@@ -117,16 +117,7 @@
 class NullStreamFatal final : public NullStream {
  public:
   NullStreamFatal() = default;
-  // ABSL_ATTRIBUTE_NORETURN doesn't seem to work on destructors with msvc, so
-  // disable msvc's warning about the d'tor never returning.
-#if defined(_MSC_VER) && !defined(__clang__)
-#pragma warning(push)
-#pragma warning(disable : 4722)
-#endif
-  ABSL_ATTRIBUTE_NORETURN ~NullStreamFatal() { _exit(1); }
-#if defined(_MSC_VER) && !defined(__clang__)
-#pragma warning(pop)
-#endif
+  [[noreturn]] ~NullStreamFatal() { _exit(1); }
 };
 
 }  // namespace log_internal
diff --git a/absl/log/internal/strip.h b/absl/log/internal/strip.h
index f8d2786..3e55010 100644
--- a/absl/log/internal/strip.h
+++ b/absl/log/internal/strip.h
@@ -20,6 +20,7 @@
 #ifndef ABSL_LOG_INTERNAL_STRIP_H_
 #define ABSL_LOG_INTERNAL_STRIP_H_
 
+#include "absl/base/attributes.h"  // IWYU pragma: keep
 #include "absl/base/log_severity.h"
 #include "absl/log/internal/log_message.h"
 #include "absl/log/internal/nullstream.h"
@@ -29,6 +30,16 @@
 // of defines comes in three flavors: vanilla, plus two variants that strip some
 // logging in subtly different ways for subtly different reasons (see below).
 #if defined(STRIP_LOG) && STRIP_LOG
+
+// Attribute for marking variables used in implementation details of logging
+// macros as unused, but only when `STRIP_LOG` is defined.
+// With `STRIP_LOG` on, not marking them triggers `-Wunused-but-set-variable`,
+// With `STRIP_LOG` off, marking them triggers `-Wused-but-marked-unused`.
+//
+// TODO(b/290784225): Replace this macro with attribute [[maybe_unused]] when
+// Abseil stops supporting C++14.
+#define ABSL_LOG_INTERNAL_ATTRIBUTE_UNUSED_IF_STRIP_LOG ABSL_ATTRIBUTE_UNUSED
+
 #define ABSL_LOGGING_INTERNAL_LOG_INFO ::absl::log_internal::NullStream()
 #define ABSL_LOGGING_INTERNAL_LOG_WARNING ::absl::log_internal::NullStream()
 #define ABSL_LOGGING_INTERNAL_LOG_ERROR ::absl::log_internal::NullStream()
@@ -38,10 +49,21 @@
   ::absl::log_internal::NullStreamMaybeFatal(::absl::kLogDebugFatal)
 #define ABSL_LOGGING_INTERNAL_LOG_LEVEL(severity) \
   ::absl::log_internal::NullStreamMaybeFatal(absl_log_internal_severity)
+
+// Fatal `DLOG`s expand a little differently to avoid being `[[noreturn]]`.
+#define ABSL_LOGGING_INTERNAL_DLOG_FATAL \
+  ::absl::log_internal::NullStreamMaybeFatal(::absl::LogSeverity::kFatal)
+#define ABSL_LOGGING_INTERNAL_DLOG_QFATAL \
+  ::absl::log_internal::NullStreamMaybeFatal(::absl::LogSeverity::kFatal)
+
 #define ABSL_LOG_INTERNAL_CHECK(failure_message) ABSL_LOGGING_INTERNAL_LOG_FATAL
 #define ABSL_LOG_INTERNAL_QCHECK(failure_message) \
   ABSL_LOGGING_INTERNAL_LOG_QFATAL
+
 #else  // !defined(STRIP_LOG) || !STRIP_LOG
+
+#define ABSL_LOG_INTERNAL_ATTRIBUTE_UNUSED_IF_STRIP_LOG
+
 #define ABSL_LOGGING_INTERNAL_LOG_INFO \
   ::absl::log_internal::LogMessage(    \
       __FILE__, __LINE__, ::absl::log_internal::LogMessage::InfoTag{})
@@ -60,6 +82,13 @@
 #define ABSL_LOGGING_INTERNAL_LOG_LEVEL(severity)      \
   ::absl::log_internal::LogMessage(__FILE__, __LINE__, \
                                    absl_log_internal_severity)
+
+// Fatal `DLOG`s expand a little differently to avoid being `[[noreturn]]`.
+#define ABSL_LOGGING_INTERNAL_DLOG_FATAL \
+  ::absl::log_internal::LogMessageDebugFatal(__FILE__, __LINE__)
+#define ABSL_LOGGING_INTERNAL_DLOG_QFATAL \
+  ::absl::log_internal::LogMessageQuietlyDebugFatal(__FILE__, __LINE__)
+
 // These special cases dispatch to special-case constructors that allow us to
 // avoid an extra function call and shrink non-LTO binaries by a percent or so.
 #define ABSL_LOG_INTERNAL_CHECK(failure_message) \
@@ -69,4 +98,11 @@
                                                failure_message)
 #endif  // !defined(STRIP_LOG) || !STRIP_LOG
 
+// This part of a non-fatal `DLOG`s expands the same as `LOG`.
+#define ABSL_LOGGING_INTERNAL_DLOG_INFO ABSL_LOGGING_INTERNAL_LOG_INFO
+#define ABSL_LOGGING_INTERNAL_DLOG_WARNING ABSL_LOGGING_INTERNAL_LOG_WARNING
+#define ABSL_LOGGING_INTERNAL_DLOG_ERROR ABSL_LOGGING_INTERNAL_LOG_ERROR
+#define ABSL_LOGGING_INTERNAL_DLOG_DFATAL ABSL_LOGGING_INTERNAL_LOG_DFATAL
+#define ABSL_LOGGING_INTERNAL_DLOG_LEVEL ABSL_LOGGING_INTERNAL_LOG_LEVEL
+
 #endif  // ABSL_LOG_INTERNAL_STRIP_H_
diff --git a/absl/log/internal/test_matchers.cc b/absl/log/internal/test_matchers.cc
index 8c6515c..042083d 100644
--- a/absl/log/internal/test_matchers.cc
+++ b/absl/log/internal/test_matchers.cc
@@ -26,6 +26,7 @@
 #include "absl/base/attributes.h"
 #include "absl/base/config.h"
 #include "absl/log/internal/test_helpers.h"
+#include "absl/log/log_entry.h"
 #include "absl/strings/string_view.h"
 #include "absl/time/clock.h"
 #include "absl/time/time.h"
@@ -131,11 +132,9 @@
   return Property("timestamp", &absl::LogEntry::timestamp, timestamp);
 }
 
-Matcher<const absl::LogEntry&> TimestampInMatchWindow() {
-  return Property("timestamp", &absl::LogEntry::timestamp,
-                  AllOf(Ge(absl::Now()), Truly([](absl::Time arg) {
-                          return arg <= absl::Now();
-                        })));
+Matcher<absl::Time> InMatchWindow() {
+  return AllOf(Ge(absl::Now()),
+               Truly([](absl::Time arg) { return arg <= absl::Now(); }));
 }
 
 Matcher<const absl::LogEntry&> ThreadID(
diff --git a/absl/log/internal/test_matchers.h b/absl/log/internal/test_matchers.h
index fc653a9..906eda2 100644
--- a/absl/log/internal/test_matchers.h
+++ b/absl/log/internal/test_matchers.h
@@ -62,7 +62,7 @@
     const ::testing::Matcher<absl::Time>& timestamp);
 // Matches if the `LogEntry`'s timestamp falls after the instantiation of this
 // matcher and before its execution, as is normal when used with EXPECT_CALL.
-::testing::Matcher<const absl::LogEntry&> TimestampInMatchWindow();
+::testing::Matcher<absl::Time> InMatchWindow();
 ::testing::Matcher<const absl::LogEntry&> ThreadID(
     const ::testing::Matcher<absl::LogEntry::tid_t>&);
 ::testing::Matcher<const absl::LogEntry&> TextMessageWithPrefixAndNewline(
diff --git a/absl/log/log.h b/absl/log/log.h
index b721b08..a4e1d1f 100644
--- a/absl/log/log.h
+++ b/absl/log/log.h
@@ -198,7 +198,6 @@
 #define ABSL_LOG_LOG_H_
 
 #include "absl/log/internal/log_impl.h"
-#include "absl/log/vlog_is_on.h"  // IWYU pragma: export
 
 // LOG()
 //
@@ -234,6 +233,11 @@
 //
 // See vlog_is_on.h for further documentation, including the usage of the
 // --vmodule flag to log at different levels in different source files.
+//
+// `VLOG` does not produce any output when verbose logging is not enabled.
+// However, simply testing whether verbose logging is enabled can be expensive.
+// If you don't intend to enable verbose logging in non-debug builds, consider
+// using `DVLOG` instead.
 #define VLOG(severity) ABSL_LOG_INTERNAL_VLOG_IMPL(severity)
 
 // `DVLOG` behaves like `VLOG` in debug mode (i.e. `#ifndef NDEBUG`).
diff --git a/absl/log/log_basic_test.cc b/absl/log/log_basic_test.cc
index 7fc7111..ef8967a 100644
--- a/absl/log/log_basic_test.cc
+++ b/absl/log/log_basic_test.cc
@@ -16,6 +16,7 @@
 #include "absl/log/log.h"
 
 #define ABSL_TEST_LOG LOG
+#define ABSL_TEST_DLOG DLOG
 
 #include "gtest/gtest.h"
 #include "absl/log/log_basic_test_impl.inc"
diff --git a/absl/log/log_basic_test_impl.inc b/absl/log/log_basic_test_impl.inc
index e2f3356..7baf5e7 100644
--- a/absl/log/log_basic_test_impl.inc
+++ b/absl/log/log_basic_test_impl.inc
@@ -25,6 +25,10 @@
 #error ABSL_TEST_LOG must be defined for these tests to work.
 #endif
 
+#ifndef ABSL_TEST_DLOG
+#error ABSL_TEST_DLOG must be defined for these tests to work.
+#endif
+
 #include <cerrno>
 #include <sstream>
 #include <string>
@@ -34,6 +38,7 @@
 #include "absl/base/internal/sysinfo.h"
 #include "absl/base/log_severity.h"
 #include "absl/log/globals.h"
+#include "absl/log/internal/globals.h"
 #include "absl/log/internal/test_actions.h"
 #include "absl/log/internal/test_helpers.h"
 #include "absl/log/internal/test_matchers.h"
@@ -48,6 +53,7 @@
 using ::absl::log_internal::DiedOfFatal;
 using ::absl::log_internal::DiedOfQFatal;
 #endif
+using ::absl::log_internal::InMatchWindow;
 using ::absl::log_internal::LoggingEnabledAt;
 using ::absl::log_internal::LogSeverity;
 using ::absl::log_internal::Prefix;
@@ -57,7 +63,7 @@
 using ::absl::log_internal::Stacktrace;
 using ::absl::log_internal::TextMessage;
 using ::absl::log_internal::ThreadID;
-using ::absl::log_internal::TimestampInMatchWindow;
+using ::absl::log_internal::Timestamp;
 using ::absl::log_internal::Verbosity;
 using ::testing::AnyNumber;
 using ::testing::Eq;
@@ -93,18 +99,20 @@
   if (LoggingEnabledAt(absl::LogSeverity::kInfo)) {
     EXPECT_CALL(
         test_sink,
-        Send(AllOf(SourceFilename(Eq(__FILE__)),
-                   SourceBasename(Eq("log_basic_test_impl.inc")),
-                   SourceLine(Eq(log_line)), Prefix(IsTrue()),
-                   LogSeverity(Eq(absl::LogSeverity::kInfo)),
-                   TimestampInMatchWindow(),
-                   ThreadID(Eq(absl::base_internal::GetTID())),
-                   TextMessage(Eq("hello world")),
-                   Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)),
-                   ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                      literal: "hello world"
-                                                    })pb")),
-                   Stacktrace(IsEmpty()))));
+        Send(AllOf(
+            SourceFilename(Eq(__FILE__)),
+            SourceBasename(Eq("log_basic_test_impl.inc")),
+            SourceLine(Eq(log_line)), Prefix(IsTrue()),
+            LogSeverity(Eq(absl::LogSeverity::kInfo)),
+            Timestamp(InMatchWindow()),
+            ThreadID(Eq(absl::base_internal::GetTID())),
+            TextMessage(Eq("hello world")),
+            Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)),
+            ENCODED_MESSAGE(MatchesEvent(
+                Eq(__FILE__), Eq(log_line), InMatchWindow(),
+                Eq(logging::proto::INFO), Eq(absl::base_internal::GetTID()),
+                ElementsAre(EqualsProto(R"pb(literal: "hello world")pb")))),
+            Stacktrace(IsEmpty()))));
   }
 
   test_sink.StartCapturingLogs();
@@ -122,18 +130,20 @@
   if (LoggingEnabledAt(absl::LogSeverity::kWarning)) {
     EXPECT_CALL(
         test_sink,
-        Send(AllOf(SourceFilename(Eq(__FILE__)),
-                   SourceBasename(Eq("log_basic_test_impl.inc")),
-                   SourceLine(Eq(log_line)), Prefix(IsTrue()),
-                   LogSeverity(Eq(absl::LogSeverity::kWarning)),
-                   TimestampInMatchWindow(),
-                   ThreadID(Eq(absl::base_internal::GetTID())),
-                   TextMessage(Eq("hello world")),
-                   Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)),
-                   ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                      literal: "hello world"
-                                                    })pb")),
-                   Stacktrace(IsEmpty()))));
+        Send(AllOf(
+            SourceFilename(Eq(__FILE__)),
+            SourceBasename(Eq("log_basic_test_impl.inc")),
+            SourceLine(Eq(log_line)), Prefix(IsTrue()),
+            LogSeverity(Eq(absl::LogSeverity::kWarning)),
+            Timestamp(InMatchWindow()),
+            ThreadID(Eq(absl::base_internal::GetTID())),
+            TextMessage(Eq("hello world")),
+            Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)),
+            ENCODED_MESSAGE(MatchesEvent(
+                Eq(__FILE__), Eq(log_line), InMatchWindow(),
+                Eq(logging::proto::WARNING), Eq(absl::base_internal::GetTID()),
+                ElementsAre(EqualsProto(R"pb(literal: "hello world")pb")))),
+            Stacktrace(IsEmpty()))));
   }
 
   test_sink.StartCapturingLogs();
@@ -151,18 +161,20 @@
   if (LoggingEnabledAt(absl::LogSeverity::kError)) {
     EXPECT_CALL(
         test_sink,
-        Send(AllOf(SourceFilename(Eq(__FILE__)),
-                   SourceBasename(Eq("log_basic_test_impl.inc")),
-                   SourceLine(Eq(log_line)), Prefix(IsTrue()),
-                   LogSeverity(Eq(absl::LogSeverity::kError)),
-                   TimestampInMatchWindow(),
-                   ThreadID(Eq(absl::base_internal::GetTID())),
-                   TextMessage(Eq("hello world")),
-                   Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)),
-                   ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                      literal: "hello world"
-                                                    })pb")),
-                   Stacktrace(IsEmpty()))));
+        Send(AllOf(
+            SourceFilename(Eq(__FILE__)),
+            SourceBasename(Eq("log_basic_test_impl.inc")),
+            SourceLine(Eq(log_line)), Prefix(IsTrue()),
+            LogSeverity(Eq(absl::LogSeverity::kError)),
+            Timestamp(InMatchWindow()),
+            ThreadID(Eq(absl::base_internal::GetTID())),
+            TextMessage(Eq("hello world")),
+            Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)),
+            ENCODED_MESSAGE(MatchesEvent(
+                Eq(__FILE__), Eq(log_line), InMatchWindow(),
+                Eq(logging::proto::ERROR), Eq(absl::base_internal::GetTID()),
+                ElementsAre(EqualsProto(R"pb(literal: "hello world")pb")))),
+            Stacktrace(IsEmpty()))));
   }
 
   test_sink.StartCapturingLogs();
@@ -206,12 +218,16 @@
                          SourceBasename(Eq("log_basic_test_impl.inc")),
                          SourceLine(Eq(log_line)), Prefix(IsTrue()),
                          LogSeverity(Eq(absl::LogSeverity::kFatal)),
-                         TimestampInMatchWindow(),
+                         Timestamp(InMatchWindow()),
                          ThreadID(Eq(absl::base_internal::GetTID())),
                          TextMessage(Eq("hello world")),
                          Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)),
-                         ENCODED_MESSAGE(EqualsProto(
-                             R"pb(value { literal: "hello world" })pb")),
+                         ENCODED_MESSAGE(MatchesEvent(
+                             Eq(__FILE__), Eq(log_line), InMatchWindow(),
+                             Eq(logging::proto::FATAL),
+                             Eq(absl::base_internal::GetTID()),
+                             ElementsAre(EqualsProto(
+                                 R"pb(literal: "hello world")pb")))),
                          Stacktrace(IsEmpty()))))
               .WillOnce(DeathTestExpectedLogging());
 
@@ -222,12 +238,16 @@
                          SourceBasename(Eq("log_basic_test_impl.inc")),
                          SourceLine(Eq(log_line)), Prefix(IsTrue()),
                          LogSeverity(Eq(absl::LogSeverity::kFatal)),
-                         TimestampInMatchWindow(),
+                         Timestamp(InMatchWindow()),
                          ThreadID(Eq(absl::base_internal::GetTID())),
                          TextMessage(Eq("hello world")),
                          Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)),
-                         ENCODED_MESSAGE(EqualsProto(
-                             R"pb(value { literal: "hello world" })pb")),
+                         ENCODED_MESSAGE(MatchesEvent(
+                             Eq(__FILE__), Eq(log_line), InMatchWindow(),
+                             Eq(logging::proto::FATAL),
+                             Eq(absl::base_internal::GetTID()),
+                             ElementsAre(EqualsProto(
+                                 R"pb(literal: "hello world")pb")))),
                          Stacktrace(Not(IsEmpty())))))
               .WillOnce(DeathTestExpectedLogging());
         }
@@ -260,12 +280,16 @@
                          SourceBasename(Eq("log_basic_test_impl.inc")),
                          SourceLine(Eq(log_line)), Prefix(IsTrue()),
                          LogSeverity(Eq(absl::LogSeverity::kFatal)),
-                         TimestampInMatchWindow(),
+                         Timestamp(InMatchWindow()),
                          ThreadID(Eq(absl::base_internal::GetTID())),
                          TextMessage(Eq("hello world")),
                          Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)),
-                         ENCODED_MESSAGE(EqualsProto(
-                             R"pb(value { literal: "hello world" })pb")),
+                         ENCODED_MESSAGE(MatchesEvent(
+                             Eq(__FILE__), Eq(log_line), InMatchWindow(),
+                             Eq(logging::proto::FATAL),
+                             Eq(absl::base_internal::GetTID()),
+                             ElementsAre(EqualsProto(
+                                 R"pb(literal: "hello world")pb")))),
                          Stacktrace(IsEmpty()))))
               .WillOnce(DeathTestExpectedLogging());
         }
@@ -289,18 +313,20 @@
   if (LoggingEnabledAt(absl::LogSeverity::kError)) {
     EXPECT_CALL(
         test_sink,
-        Send(AllOf(SourceFilename(Eq(__FILE__)),
-                   SourceBasename(Eq("log_basic_test_impl.inc")),
-                   SourceLine(Eq(log_line)), Prefix(IsTrue()),
-                   LogSeverity(Eq(absl::LogSeverity::kError)),
-                   TimestampInMatchWindow(),
-                   ThreadID(Eq(absl::base_internal::GetTID())),
-                   TextMessage(Eq("hello world")),
-                   Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)),
-                   ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                      literal: "hello world"
-                                                    })pb")),
-                   Stacktrace(IsEmpty()))));
+        Send(AllOf(
+            SourceFilename(Eq(__FILE__)),
+            SourceBasename(Eq("log_basic_test_impl.inc")),
+            SourceLine(Eq(log_line)), Prefix(IsTrue()),
+            LogSeverity(Eq(absl::LogSeverity::kError)),
+            Timestamp(InMatchWindow()),
+            ThreadID(Eq(absl::base_internal::GetTID())),
+            TextMessage(Eq("hello world")),
+            Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)),
+            ENCODED_MESSAGE(MatchesEvent(
+                Eq(__FILE__), Eq(log_line), InMatchWindow(),
+                Eq(logging::proto::ERROR), Eq(absl::base_internal::GetTID()),
+                ElementsAre(EqualsProto(R"pb(literal: "hello world")pb")))),
+            Stacktrace(IsEmpty()))));
   }
 
   test_sink.StartCapturingLogs();
@@ -334,12 +360,16 @@
                          SourceBasename(Eq("log_basic_test_impl.inc")),
                          SourceLine(Eq(log_line)), Prefix(IsTrue()),
                          LogSeverity(Eq(absl::LogSeverity::kFatal)),
-                         TimestampInMatchWindow(),
+                         Timestamp(InMatchWindow()),
                          ThreadID(Eq(absl::base_internal::GetTID())),
                          TextMessage(Eq("hello world")),
                          Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)),
-                         ENCODED_MESSAGE(EqualsProto(
-                             R"pb(value { literal: "hello world" })pb")),
+                         ENCODED_MESSAGE(MatchesEvent(
+                             Eq(__FILE__), Eq(log_line), InMatchWindow(),
+                             Eq(logging::proto::FATAL),
+                             Eq(absl::base_internal::GetTID()),
+                             ElementsAre(EqualsProto(
+                                 R"pb(literal: "hello world")pb")))),
                          Stacktrace(IsEmpty()))))
               .WillOnce(DeathTestExpectedLogging());
 
@@ -350,12 +380,16 @@
                          SourceBasename(Eq("log_basic_test_impl.inc")),
                          SourceLine(Eq(log_line)), Prefix(IsTrue()),
                          LogSeverity(Eq(absl::LogSeverity::kFatal)),
-                         TimestampInMatchWindow(),
+                         Timestamp(InMatchWindow()),
                          ThreadID(Eq(absl::base_internal::GetTID())),
                          TextMessage(Eq("hello world")),
                          Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)),
-                         ENCODED_MESSAGE(EqualsProto(
-                             R"pb(value { literal: "hello world" })pb")),
+                         ENCODED_MESSAGE(MatchesEvent(
+                             Eq(__FILE__), Eq(log_line), InMatchWindow(),
+                             Eq(logging::proto::FATAL),
+                             Eq(absl::base_internal::GetTID()),
+                             ElementsAre(EqualsProto(
+                                 R"pb(literal: "hello world")pb")))),
                          Stacktrace(Not(IsEmpty())))))
               .WillOnce(DeathTestExpectedLogging());
         }
@@ -367,6 +401,27 @@
 }
 #endif
 
+#ifndef NDEBUG
+TEST_P(BasicLogTest, DFatalIsCancellable) {
+  // LOG(DFATAL) does not die when DFATAL death is disabled.
+  absl::log_internal::SetExitOnDFatal(false);
+  ABSL_TEST_LOG(DFATAL) << "hello world";
+  absl::log_internal::SetExitOnDFatal(true);
+}
+
+#if GTEST_HAS_DEATH_TEST
+TEST_P(BasicLogDeathTest, DLogFatalIsNotCancellable) {
+  EXPECT_EXIT(
+      {
+        absl::log_internal::SetExitOnDFatal(false);
+        ABSL_TEST_DLOG(FATAL) << "hello world";
+        absl::log_internal::SetExitOnDFatal(true);
+      },
+      DiedOfFatal, "");
+}
+#endif
+#endif
+
 TEST_P(BasicLogTest, Level) {
   absl::log_internal::ScopedMinLogLevel scoped_min_log_level(GetParam());
 
@@ -382,17 +437,25 @@
     if (LoggingEnabledAt(severity)) {
       EXPECT_CALL(
           test_sink,
-          Send(AllOf(SourceFilename(Eq(__FILE__)),
-                     SourceBasename(Eq("log_basic_test_impl.inc")),
-                     SourceLine(Eq(log_line)), Prefix(IsTrue()),
-                     LogSeverity(Eq(severity)), TimestampInMatchWindow(),
-                     ThreadID(Eq(absl::base_internal::GetTID())),
-                     TextMessage(Eq("hello world")),
-                     Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)),
-                     ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                        literal: "hello world"
-                                                      })pb")),
-                     Stacktrace(IsEmpty()))));
+          Send(AllOf(
+              SourceFilename(Eq(__FILE__)),
+              SourceBasename(Eq("log_basic_test_impl.inc")),
+              SourceLine(Eq(log_line)), Prefix(IsTrue()),
+              LogSeverity(Eq(severity)), Timestamp(InMatchWindow()),
+              ThreadID(Eq(absl::base_internal::GetTID())),
+              TextMessage(Eq("hello world")),
+              Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)),
+              ENCODED_MESSAGE(MatchesEvent(
+                  Eq(__FILE__), Eq(log_line), InMatchWindow(),
+                  Eq(severity == absl::LogSeverity::kInfo ? logging::proto::INFO
+                     : severity == absl::LogSeverity::kWarning
+                         ? logging::proto::WARNING
+                     : severity == absl::LogSeverity::kError
+                         ? logging::proto::ERROR
+                         : 0),
+                  Eq(absl::base_internal::GetTID()),
+                  ElementsAre(EqualsProto(R"pb(literal: "hello world")pb")))),
+              Stacktrace(IsEmpty()))));
     }
     test_sink.StartCapturingLogs();
     do_log();
@@ -429,12 +492,16 @@
                          SourceBasename(Eq("log_basic_test_impl.inc")),
                          SourceLine(Eq(log_line)), Prefix(IsTrue()),
                          LogSeverity(Eq(absl::LogSeverity::kFatal)),
-                         TimestampInMatchWindow(),
+                         Timestamp(InMatchWindow()),
                          ThreadID(Eq(absl::base_internal::GetTID())),
                          TextMessage(Eq("hello world")),
                          Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)),
-                         ENCODED_MESSAGE(EqualsProto(
-                             R"pb(value { literal: "hello world" })pb")),
+                         ENCODED_MESSAGE(MatchesEvent(
+                             Eq(__FILE__), Eq(log_line), InMatchWindow(),
+                             Eq(logging::proto::FATAL),
+                             Eq(absl::base_internal::GetTID()),
+                             ElementsAre(EqualsProto(
+                                 R"pb(literal: "hello world")pb")))),
                          Stacktrace(IsEmpty()))))
               .WillOnce(DeathTestExpectedLogging());
 
@@ -444,12 +511,16 @@
                          SourceBasename(Eq("log_basic_test_impl.inc")),
                          SourceLine(Eq(log_line)), Prefix(IsTrue()),
                          LogSeverity(Eq(absl::LogSeverity::kFatal)),
-                         TimestampInMatchWindow(),
+                         Timestamp(InMatchWindow()),
                          ThreadID(Eq(absl::base_internal::GetTID())),
                          TextMessage(Eq("hello world")),
                          Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)),
-                         ENCODED_MESSAGE(EqualsProto(
-                             R"pb(value { literal: "hello world" })pb")),
+                         ENCODED_MESSAGE(MatchesEvent(
+                             Eq(__FILE__), Eq(log_line), InMatchWindow(),
+                             Eq(logging::proto::FATAL),
+                             Eq(absl::base_internal::GetTID()),
+                             ElementsAre(EqualsProto(
+                                 R"pb(literal: "hello world")pb")))),
                          Stacktrace(Not(IsEmpty())))))
               .WillOnce(DeathTestExpectedLogging());
         }
diff --git a/absl/log/log_benchmark.cc b/absl/log/log_benchmark.cc
index 45d9a5d..60c0fd6 100644
--- a/absl/log/log_benchmark.cc
+++ b/absl/log/log_benchmark.cc
@@ -17,10 +17,12 @@
 #include "absl/flags/flag.h"
 #include "absl/log/check.h"
 #include "absl/log/globals.h"
+#include "absl/log/internal/flags.h"
 #include "absl/log/log.h"
 #include "absl/log/log_entry.h"
 #include "absl/log/log_sink.h"
 #include "absl/log/log_sink_registry.h"
+#include "absl/log/vlog_is_on.h"
 #include "benchmark/benchmark.h"
 
 namespace {
@@ -93,5 +95,70 @@
 }
 BENCHMARK(BM_EnabledLogOverhead);
 
+static void BM_VlogIsOnOverhead(benchmark::State& state) {
+  // It would make sense to do this only when state.thread_index == 0,
+  // but thread_index is an int on some platforms (e.g. Android) and a
+  // function returning an int on others. So we just do it on all threads.
+  // TODO(b/152609127): set only if thread_index == 0.
+  absl::SetFlag(&FLAGS_v, 0);
+
+  while (state.KeepRunningBatch(10)) {
+    benchmark::DoNotOptimize(VLOG_IS_ON(0));  // 1
+    benchmark::DoNotOptimize(VLOG_IS_ON(0));  // 2
+    benchmark::DoNotOptimize(VLOG_IS_ON(0));  // 3
+    benchmark::DoNotOptimize(VLOG_IS_ON(0));  // 4
+    benchmark::DoNotOptimize(VLOG_IS_ON(0));  // 5
+    benchmark::DoNotOptimize(VLOG_IS_ON(0));  // 6
+    benchmark::DoNotOptimize(VLOG_IS_ON(0));  // 7
+    benchmark::DoNotOptimize(VLOG_IS_ON(0));  // 8
+    benchmark::DoNotOptimize(VLOG_IS_ON(0));  // 9
+    benchmark::DoNotOptimize(VLOG_IS_ON(0));  // 10
+  }
+}
+BENCHMARK(BM_VlogIsOnOverhead)->ThreadRange(1, 64);
+
+static void BM_VlogIsNotOnOverhead(benchmark::State& state) {
+  // It would make sense to do this only when state.thread_index == 0,
+  // but thread_index is an int on some platforms (e.g. Android) and a
+  // function returning an int on others. So we just do it on all threads.
+  // TODO(b/152609127): set only if thread_index == 0.
+  absl::SetFlag(&FLAGS_v, 0);
+
+  while (state.KeepRunningBatch(10)) {
+    benchmark::DoNotOptimize(VLOG_IS_ON(1));  // 1
+    benchmark::DoNotOptimize(VLOG_IS_ON(1));  // 2
+    benchmark::DoNotOptimize(VLOG_IS_ON(1));  // 3
+    benchmark::DoNotOptimize(VLOG_IS_ON(1));  // 4
+    benchmark::DoNotOptimize(VLOG_IS_ON(1));  // 5
+    benchmark::DoNotOptimize(VLOG_IS_ON(1));  // 6
+    benchmark::DoNotOptimize(VLOG_IS_ON(1));  // 7
+    benchmark::DoNotOptimize(VLOG_IS_ON(1));  // 8
+    benchmark::DoNotOptimize(VLOG_IS_ON(1));  // 9
+    benchmark::DoNotOptimize(VLOG_IS_ON(1));  // 10
+  }
+}
+BENCHMARK(BM_VlogIsNotOnOverhead)->ThreadRange(1, 64);
+
+static void BM_LogEveryNOverhead(benchmark::State& state) {
+  absl::ScopedStderrThreshold disable_stderr_logging(
+      absl::LogSeverityAtLeast::kInfinity);
+  absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfinity);
+  ABSL_ATTRIBUTE_UNUSED NullLogSink null_sink;
+
+  while (state.KeepRunningBatch(10)) {
+    LOG_EVERY_N_SEC(INFO, 10);
+    LOG_EVERY_N_SEC(INFO, 20);
+    LOG_EVERY_N_SEC(INFO, 30);
+    LOG_EVERY_N_SEC(INFO, 40);
+    LOG_EVERY_N_SEC(INFO, 50);
+    LOG_EVERY_N_SEC(INFO, 60);
+    LOG_EVERY_N_SEC(INFO, 70);
+    LOG_EVERY_N_SEC(INFO, 80);
+    LOG_EVERY_N_SEC(INFO, 90);
+    LOG_EVERY_N_SEC(INFO, 100);
+  }
+}
+BENCHMARK(BM_LogEveryNOverhead)->ThreadRange(1, 64);
+
 }  // namespace
 
diff --git a/absl/log/log_format_test.cc b/absl/log/log_format_test.cc
index dbad5d9..beee966 100644
--- a/absl/log/log_format_test.cc
+++ b/absl/log/log_format_test.cc
@@ -78,7 +78,7 @@
                          TextPrefix(AsString(EndsWith(absl::StrCat(
                              " log_format_test.cc:", log_line, "] ")))),
                          TextMessage(IsEmpty()),
-                         ENCODED_MESSAGE(EqualsProto(R"pb()pb")))));
+                         ENCODED_MESSAGE(HasValues(IsEmpty())))));
 
   test_sink.StartCapturingLogs();
   do_log();
@@ -96,11 +96,11 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << value;
 
-  EXPECT_CALL(
-      test_sink,
-      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                 TextMessage(Eq("x")),
-                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "x" })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("x")),
+                         ENCODED_MESSAGE(HasValues(
+                             ElementsAre(EqualsProto(R"pb(str: "x")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value;
@@ -113,12 +113,11 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << value;
 
-  EXPECT_CALL(
-      test_sink, Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                            TextMessage(Eq("\xee")),
-                            ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                               str: "\xee"
-                                                             })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("\xee")),
+                         ENCODED_MESSAGE(HasValues(ElementsAre(
+                             EqualsProto(R"pb(str: "\xee")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value;
@@ -137,11 +136,11 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << value;
 
-  EXPECT_CALL(
-      test_sink,
-      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                 TextMessage(Eq("224")),
-                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "224" })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("224")),
+                         ENCODED_MESSAGE(HasValues(
+                             ElementsAre(EqualsProto(R"pb(str: "224")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value;
@@ -156,11 +155,11 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << value.bits;
 
-  EXPECT_CALL(
-      test_sink,
-      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                 TextMessage(Eq("42")),
-                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "42" })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("42")),
+                         ENCODED_MESSAGE(HasValues(
+                             ElementsAre(EqualsProto(R"pb(str: "42")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value.bits;
@@ -179,11 +178,11 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << value;
 
-  EXPECT_CALL(
-      test_sink,
-      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                 TextMessage(Eq("224")),
-                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "224" })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("224")),
+                         ENCODED_MESSAGE(HasValues(
+                             ElementsAre(EqualsProto(R"pb(str: "224")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value;
@@ -196,12 +195,11 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << value;
 
-  EXPECT_CALL(
-      test_sink, Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                            TextMessage(Eq("-112")),
-                            ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                               str: "-112"
-                                                             })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("-112")),
+                         ENCODED_MESSAGE(HasValues(ElementsAre(
+                             EqualsProto(R"pb(str: "-112")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value;
@@ -216,11 +214,11 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << value.bits;
 
-  EXPECT_CALL(
-      test_sink,
-      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                 TextMessage(Eq("21")),
-                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "21" })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("21")),
+                         ENCODED_MESSAGE(HasValues(
+                             ElementsAre(EqualsProto(R"pb(str: "21")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value.bits;
@@ -235,11 +233,11 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << value.bits;
 
-  EXPECT_CALL(
-      test_sink,
-      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                 TextMessage(Eq("-21")),
-                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "-21" })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("-21")),
+                         ENCODED_MESSAGE(HasValues(
+                             ElementsAre(EqualsProto(R"pb(str: "-21")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value.bits;
@@ -276,11 +274,11 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << value;
 
-  EXPECT_CALL(
-      test_sink,
-      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                 TextMessage(Eq("224")),
-                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "224" })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("224")),
+                         ENCODED_MESSAGE(HasValues(
+                             ElementsAre(EqualsProto(R"pb(str: "224")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value;
@@ -295,11 +293,11 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << value.bits;
 
-  EXPECT_CALL(
-      test_sink,
-      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                 TextMessage(Eq("42")),
-                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "42" })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("42")),
+                         ENCODED_MESSAGE(HasValues(
+                             ElementsAre(EqualsProto(R"pb(str: "42")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value.bits;
@@ -335,11 +333,11 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << value;
 
-  EXPECT_CALL(
-      test_sink,
-      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                 TextMessage(Eq("224")),
-                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "224" })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("224")),
+                         ENCODED_MESSAGE(HasValues(
+                             ElementsAre(EqualsProto(R"pb(str: "224")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value;
@@ -352,12 +350,11 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << value;
 
-  EXPECT_CALL(
-      test_sink, Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                            TextMessage(Eq("-112")),
-                            ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                               str: "-112"
-                                                             })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("-112")),
+                         ENCODED_MESSAGE(HasValues(ElementsAre(
+                             EqualsProto(R"pb(str: "-112")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value;
@@ -372,11 +369,11 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << value.bits;
 
-  EXPECT_CALL(
-      test_sink,
-      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                 TextMessage(Eq("21")),
-                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "21" })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("21")),
+                         ENCODED_MESSAGE(HasValues(
+                             ElementsAre(EqualsProto(R"pb(str: "21")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value.bits;
@@ -391,11 +388,11 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << value.bits;
 
-  EXPECT_CALL(
-      test_sink,
-      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                 TextMessage(Eq("-21")),
-                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "-21" })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("-21")),
+                         ENCODED_MESSAGE(HasValues(
+                             ElementsAre(EqualsProto(R"pb(str: "-21")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value.bits;
@@ -412,9 +409,8 @@
   EXPECT_CALL(test_sink,
               Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
                          TextMessage(Eq("6.02e+23")),
-                         ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                            str: "6.02e+23"
-                                                          })pb")))));
+                         ENCODED_MESSAGE(HasValues(ElementsAre(
+                             EqualsProto(R"pb(str: "6.02e+23")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value;
@@ -430,9 +426,8 @@
   EXPECT_CALL(test_sink,
               Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
                          TextMessage(Eq("-6.02e+23")),
-                         ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                            str: "-6.02e+23"
-                                                          })pb")))));
+                         ENCODED_MESSAGE(HasValues(ElementsAre(
+                             EqualsProto(R"pb(str: "-6.02e+23")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value;
@@ -448,9 +443,8 @@
   EXPECT_CALL(test_sink,
               Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
                          TextMessage(Eq("6.02e-23")),
-                         ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                            str: "6.02e-23"
-                                                          })pb")))));
+                         ENCODED_MESSAGE(HasValues(ElementsAre(
+                             EqualsProto(R"pb(str: "6.02e-23")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value;
@@ -466,9 +460,8 @@
   EXPECT_CALL(test_sink,
               Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
                          TextMessage(Eq("6.02e+23")),
-                         ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                            str: "6.02e+23"
-                                                          })pb")))));
+                         ENCODED_MESSAGE(HasValues(ElementsAre(
+                             EqualsProto(R"pb(str: "6.02e+23")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value;
@@ -484,9 +477,8 @@
   EXPECT_CALL(test_sink,
               Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
                          TextMessage(Eq("-6.02e+23")),
-                         ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                            str: "-6.02e+23"
-                                                          })pb")))));
+                         ENCODED_MESSAGE(HasValues(ElementsAre(
+                             EqualsProto(R"pb(str: "-6.02e+23")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value;
@@ -502,9 +494,8 @@
   EXPECT_CALL(test_sink,
               Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
                          TextMessage(Eq("6.02e-23")),
-                         ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                            str: "6.02e-23"
-                                                          })pb")))));
+                         ENCODED_MESSAGE(HasValues(ElementsAre(
+                             EqualsProto(R"pb(str: "6.02e-23")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value;
@@ -522,11 +513,11 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << value;
 
-  EXPECT_CALL(
-      test_sink,
-      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                 TextMessage(Eq("0")),
-                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "0" })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("0")),
+                         ENCODED_MESSAGE(HasValues(
+                             ElementsAre(EqualsProto(R"pb(str: "0")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value;
@@ -539,11 +530,11 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << value;
 
-  EXPECT_CALL(
-      test_sink,
-      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                 TextMessage(Eq("1")),
-                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "1" })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("1")),
+                         ENCODED_MESSAGE(HasValues(
+                             ElementsAre(EqualsProto(R"pb(str: "1")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value;
@@ -556,11 +547,11 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << value;
 
-  EXPECT_CALL(
-      test_sink,
-      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                 TextMessage(AnyOf(Eq("inf"), Eq("Inf"))),
-                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "inf" })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(AnyOf(Eq("inf"), Eq("Inf"))),
+                         ENCODED_MESSAGE(HasValues(
+                             ElementsAre(EqualsProto(R"pb(str: "inf")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value;
@@ -573,12 +564,11 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << value;
 
-  EXPECT_CALL(
-      test_sink, Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                            TextMessage(AnyOf(Eq("-inf"), Eq("-Inf"))),
-                            ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                               str: "-inf"
-                                                             })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(AnyOf(Eq("-inf"), Eq("-Inf"))),
+                         ENCODED_MESSAGE(HasValues(ElementsAre(
+                             EqualsProto(R"pb(str: "-inf")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value;
@@ -591,11 +581,11 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << value;
 
-  EXPECT_CALL(
-      test_sink,
-      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                 TextMessage(AnyOf(Eq("nan"), Eq("NaN"))),
-                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "nan" })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(AnyOf(Eq("nan"), Eq("NaN"))),
+                         ENCODED_MESSAGE(HasValues(
+                             ElementsAre(EqualsProto(R"pb(str: "nan")pb")))))));
   test_sink.StartCapturingLogs();
   LOG(INFO) << value;
 }
@@ -608,15 +598,29 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << value;
 
+  // On RISC-V, don't expect that formatting -NaN produces the same string as
+  // streaming it. #ifdefing out just the relevant line breaks the MSVC build,
+  // so duplicate the entire EXPECT_CALL.
+#ifdef __riscv
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(
+          TextMessage(AnyOf(Eq("-nan"), Eq("nan"), Eq("NaN"), Eq("-nan(ind)"))),
+          ENCODED_MESSAGE(HasValues(
+              ElementsAre(AnyOf(EqualsProto(R"pb(str: "-nan")pb"),
+                                EqualsProto(R"pb(str: "nan")pb"),
+                                EqualsProto(R"pb(str: "-nan(ind)")pb"))))))));
+#else
   EXPECT_CALL(
       test_sink,
       Send(AllOf(
           TextMessage(MatchesOstream(comparison_stream)),
           TextMessage(AnyOf(Eq("-nan"), Eq("nan"), Eq("NaN"), Eq("-nan(ind)"))),
-          ENCODED_MESSAGE(
-              AnyOf(EqualsProto(R"pb(value { str: "-nan" })pb"),
-                    EqualsProto(R"pb(value { str: "nan" })pb"),
-                    EqualsProto(R"pb(value { str: "-nan(ind)" })pb"))))));
+          ENCODED_MESSAGE(HasValues(
+              ElementsAre(AnyOf(EqualsProto(R"pb(str: "-nan")pb"),
+                                EqualsProto(R"pb(str: "nan")pb"),
+                                EqualsProto(R"pb(str: "-nan(ind)")pb"))))))));
+#endif
   test_sink.StartCapturingLogs();
   LOG(INFO) << value;
 }
@@ -652,13 +656,12 @@
 
   EXPECT_CALL(
       test_sink,
-      Send(AllOf(
-          TextMessage(MatchesOstream(comparison_stream)),
-          TextMessage(
-              AnyOf(Eq("0xdeadbeef"), Eq("DEADBEEF"), Eq("00000000DEADBEEF"))),
-          ENCODED_MESSAGE(AnyOf(
-              EqualsProto(R"pb(value { str: "0xdeadbeef" })pb"),
-              EqualsProto(R"pb(value { str: "00000000DEADBEEF" })pb"))))));
+      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                 TextMessage(AnyOf(Eq("0xdeadbeef"), Eq("DEADBEEF"),
+                                   Eq("00000000DEADBEEF"))),
+                 ENCODED_MESSAGE(HasValues(ElementsAre(
+                     AnyOf(EqualsProto(R"pb(str: "0xdeadbeef")pb"),
+                           EqualsProto(R"pb(str: "00000000DEADBEEF")pb"))))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value;
@@ -680,12 +683,11 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << value;
 
-  EXPECT_CALL(
-      test_sink, Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                            TextMessage(Eq("false")),
-                            ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                               str: "false"
-                                                             })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("false")),
+                         ENCODED_MESSAGE(HasValues(ElementsAre(
+                             EqualsProto(R"pb(str: "false")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value;
@@ -698,12 +700,11 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << value;
 
-  EXPECT_CALL(
-      test_sink, Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                            TextMessage(Eq("true")),
-                            ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                               str: "true"
-                                                             })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("true")),
+                         ENCODED_MESSAGE(HasValues(ElementsAre(
+                             EqualsProto(R"pb(str: "true")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value;
@@ -729,7 +730,8 @@
       Send(AllOf(
           // `MatchesOstream` deliberately omitted since we deliberately differ.
           TextMessage(Eq("(null)")),
-          ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "(null)" })pb")))));
+          ENCODED_MESSAGE(
+              HasValues(ElementsAre(EqualsProto(R"pb(str: "(null)")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value;
@@ -743,12 +745,11 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << value;
 
-  EXPECT_CALL(
-      test_sink, Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                            TextMessage(Eq("value")),
-                            ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                               str: "value"
-                                                             })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("value")),
+                         ENCODED_MESSAGE(HasValues(ElementsAre(
+                             EqualsProto(R"pb(str: "value")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value;
@@ -761,12 +762,11 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << value;
 
-  EXPECT_CALL(
-      test_sink, Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                            TextMessage(Eq("true")),
-                            ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                               str: "true"
-                                                             })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("true")),
+                         ENCODED_MESSAGE(HasValues(ElementsAre(
+                             EqualsProto(R"pb(str: "true")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value;
@@ -779,12 +779,11 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << value;
 
-  EXPECT_CALL(
-      test_sink, Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                            TextMessage(Eq("false")),
-                            ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                               str: "false"
-                                                             })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("false")),
+                         ENCODED_MESSAGE(HasValues(ElementsAre(
+                             EqualsProto(R"pb(str: "false")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value;
@@ -799,9 +798,8 @@
   EXPECT_CALL(test_sink,
               Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
                          TextMessage(Eq("value")),
-                         ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                            literal: "value"
-                                                          })pb")))));
+                         ENCODED_MESSAGE(HasValues(ElementsAre(
+                             EqualsProto(R"pb(literal: "value")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << "value";
@@ -814,12 +812,11 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << value;
 
-  EXPECT_CALL(
-      test_sink, Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                            TextMessage(Eq("value")),
-                            ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                               str: "value"
-                                                             })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("value")),
+                         ENCODED_MESSAGE(HasValues(ElementsAre(
+                             EqualsProto(R"pb(str: "value")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value;
@@ -840,9 +837,8 @@
   EXPECT_CALL(test_sink,
               Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
                          TextMessage(Eq("CustomClass{}")),
-                         ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                            str: "CustomClass{}"
-                                                          })pb")))));
+                         ENCODED_MESSAGE(HasValues(ElementsAre(
+                             EqualsProto(R"pb(str: "CustomClass{}")pb")))))));
   test_sink.StartCapturingLogs();
   LOG(INFO) << value;
 }
@@ -864,12 +860,11 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << value;
 
-  EXPECT_CALL(
-      test_sink,
-      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                 TextMessage(Eq("CustomClassNonCopyable{}")),
-                 ENCODED_MESSAGE(EqualsProto(
-                     R"pb(value { str: "CustomClassNonCopyable{}" })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("CustomClassNonCopyable{}")),
+                         ENCODED_MESSAGE(HasValues(ElementsAre(EqualsProto(
+                             R"pb(str: "CustomClassNonCopyable{}")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value;
@@ -892,9 +887,9 @@
 
   EXPECT_CALL(
       test_sink,
-      Send(AllOf(
-          TextMessage(Eq("(10, 20)")), TextMessage(Eq(absl::StrCat(p))),
-          ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "(10, 20)" })pb")))));
+      Send(AllOf(TextMessage(Eq("(10, 20)")), TextMessage(Eq(absl::StrCat(p))),
+                 ENCODED_MESSAGE(HasValues(
+                     ElementsAre(EqualsProto(R"pb(str: "(10, 20)")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << p;
@@ -923,9 +918,9 @@
 
   EXPECT_CALL(
       test_sink,
-      Send(AllOf(
-          TextMessage(Eq("(10, 20)")), TextMessage(Eq(absl::StrCat(p))),
-          ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "(10, 20)" })pb")))));
+      Send(AllOf(TextMessage(Eq("(10, 20)")), TextMessage(Eq(absl::StrCat(p))),
+                 ENCODED_MESSAGE(HasValues(
+                     ElementsAre(EqualsProto(R"pb(str: "(10, 20)")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << p;
@@ -944,10 +939,10 @@
 
   PointStreamsNothing p;
 
-  EXPECT_CALL(
-      test_sink,
-      Send(AllOf(TextMessage(Eq("77")), TextMessage(Eq(absl::StrCat(p, 77))),
-                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "77" })pb")))));
+  EXPECT_CALL(test_sink, Send(AllOf(TextMessage(Eq("77")),
+                                    TextMessage(Eq(absl::StrCat(p, 77))),
+                                    ENCODED_MESSAGE(HasValues(ElementsAre(
+                                        EqualsProto(R"pb(str: "77")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << p << 77;
@@ -971,10 +966,10 @@
 
   EXPECT_CALL(
       test_sink,
-      Send(AllOf(
-          TextMessage(Eq("(10, 20)")), TextMessage(Eq(absl::StrCat(p))),
-          ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "(" }
-                                           value { str: "10, 20)" })pb")))));
+      Send(AllOf(TextMessage(Eq("(10, 20)")), TextMessage(Eq(absl::StrCat(p))),
+                 ENCODED_MESSAGE(HasValues(
+                     ElementsAre(EqualsProto(R"pb(str: "(")pb"),
+                                 EqualsProto(R"pb(str: "10, 20)")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << p;
@@ -992,12 +987,12 @@
   EXPECT_CALL(test_sink,
               Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
                          TextMessage(Eq("1 true 1")),
-                         ENCODED_MESSAGE(EqualsProto(
-                             R"pb(value { str: "1" }
-                                  value { literal: " " }
-                                  value { str: "true" }
-                                  value { literal: " " }
-                                  value { str: "1" })pb")))));
+                         ENCODED_MESSAGE(HasValues(
+                             ElementsAre(EqualsProto(R"pb(str: "1")pb"),
+                                         EqualsProto(R"pb(literal: " ")pb"),
+                                         EqualsProto(R"pb(str: "true")pb"),
+                                         EqualsProto(R"pb(literal: " ")pb"),
+                                         EqualsProto(R"pb(str: "1")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << std::noboolalpha << value << " "  //
@@ -1017,12 +1012,12 @@
   EXPECT_CALL(test_sink,
               Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
                          TextMessage(Eq("0 false 0")),
-                         ENCODED_MESSAGE(EqualsProto(
-                             R"pb(value { str: "0" }
-                                  value { literal: " " }
-                                  value { str: "false" }
-                                  value { literal: " " }
-                                  value { str: "0" })pb")))));
+                         ENCODED_MESSAGE(HasValues(
+                             ElementsAre(EqualsProto(R"pb(str: "0")pb"),
+                                         EqualsProto(R"pb(literal: " ")pb"),
+                                         EqualsProto(R"pb(str: "false")pb"),
+                                         EqualsProto(R"pb(literal: " ")pb"),
+                                         EqualsProto(R"pb(str: "0")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << std::noboolalpha << value << " "  //
@@ -1039,15 +1034,15 @@
                     << std::showpoint << value << " "    //
                     << std::noshowpoint << value;
 
-  EXPECT_CALL(
-      test_sink,
-      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                 TextMessage(Eq("77 77.0000 77")),
-                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "77" }
-                                                  value { literal: " " }
-                                                  value { str: "77.0000" }
-                                                  value { literal: " " }
-                                                  value { str: "77" })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("77 77.0000 77")),
+                         ENCODED_MESSAGE(HasValues(
+                             ElementsAre(EqualsProto(R"pb(str: "77")pb"),
+                                         EqualsProto(R"pb(literal: " ")pb"),
+                                         EqualsProto(R"pb(str: "77.0000")pb"),
+                                         EqualsProto(R"pb(literal: " ")pb"),
+                                         EqualsProto(R"pb(str: "77")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << std::noshowpoint << value << " "  //
@@ -1064,15 +1059,15 @@
                     << std::showpos << value << " "    //
                     << std::noshowpos << value;
 
-  EXPECT_CALL(
-      test_sink,
-      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                 TextMessage(Eq("77 +77 77")),
-                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "77" }
-                                                  value { literal: " " }
-                                                  value { str: "+77" }
-                                                  value { literal: " " }
-                                                  value { str: "77" })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("77 +77 77")),
+                         ENCODED_MESSAGE(HasValues(
+                             ElementsAre(EqualsProto(R"pb(str: "77")pb"),
+                                         EqualsProto(R"pb(literal: " ")pb"),
+                                         EqualsProto(R"pb(str: "+77")pb"),
+                                         EqualsProto(R"pb(literal: " ")pb"),
+                                         EqualsProto(R"pb(str: "77")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << std::noshowpos << value << " "  //
@@ -1092,12 +1087,12 @@
   EXPECT_CALL(test_sink,
               Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
                          TextMessage(Eq("7.7e+07 7.7E+07 7.7e+07")),
-                         ENCODED_MESSAGE(EqualsProto(
-                             R"pb(value { str: "7.7e+07" }
-                                  value { literal: " " }
-                                  value { str: "7.7E+07" }
-                                  value { literal: " " }
-                                  value { str: "7.7e+07" })pb")))));
+                         ENCODED_MESSAGE(HasValues(ElementsAre(
+                             EqualsProto(R"pb(str: "7.7e+07")pb"),
+                             EqualsProto(R"pb(literal: " ")pb"),
+                             EqualsProto(R"pb(str: "7.7E+07")pb"),
+                             EqualsProto(R"pb(literal: " ")pb"),
+                             EqualsProto(R"pb(str: "7.7e+07")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << std::nouppercase << value << " "  //
@@ -1112,12 +1107,11 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << std::hex << value;
 
-  EXPECT_CALL(
-      test_sink, Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                            TextMessage(Eq("0x77")),
-                            ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                               str: "0x77"
-                                                             })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("0x77")),
+                         ENCODED_MESSAGE(HasValues(ElementsAre(
+                             EqualsProto(R"pb(str: "0x77")pb")))))));
   test_sink.StartCapturingLogs();
   LOG(INFO) << std::hex << value;
 }
@@ -1129,11 +1123,11 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << std::oct << value;
 
-  EXPECT_CALL(
-      test_sink,
-      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                 TextMessage(Eq("077")),
-                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "077" })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("077")),
+                         ENCODED_MESSAGE(HasValues(
+                             ElementsAre(EqualsProto(R"pb(str: "077")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << std::oct << value;
@@ -1146,11 +1140,11 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << std::hex << std::dec << value;
 
-  EXPECT_CALL(
-      test_sink,
-      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                 TextMessage(Eq("77")),
-                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "77" })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("77")),
+                         ENCODED_MESSAGE(HasValues(
+                             ElementsAre(EqualsProto(R"pb(str: "77")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << std::hex << std::dec << value;
@@ -1166,15 +1160,15 @@
                     << std::showbase << value << " "    //
                     << std::noshowbase << value;
 
-  EXPECT_CALL(
-      test_sink,
-      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                 TextMessage(Eq("77 0x77 77")),
-                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "77" }
-                                                  value { literal: " " }
-                                                  value { str: "0x77" }
-                                                  value { literal: " " }
-                                                  value { str: "77" })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("77 0x77 77")),
+                         ENCODED_MESSAGE(HasValues(
+                             ElementsAre(EqualsProto(R"pb(str: "77")pb"),
+                                         EqualsProto(R"pb(literal: " ")pb"),
+                                         EqualsProto(R"pb(str: "0x77")pb"),
+                                         EqualsProto(R"pb(literal: " ")pb"),
+                                         EqualsProto(R"pb(str: "77")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << std::hex                         //
@@ -1193,15 +1187,15 @@
                     << std::showbase << value << " "    //
                     << std::noshowbase << value;
 
-  EXPECT_CALL(
-      test_sink,
-      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                 TextMessage(Eq("77 077 77")),
-                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "77" }
-                                                  value { literal: " " }
-                                                  value { str: "077" }
-                                                  value { literal: " " }
-                                                  value { str: "77" })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("77 077 77")),
+                         ENCODED_MESSAGE(HasValues(
+                             ElementsAre(EqualsProto(R"pb(str: "77")pb"),
+                                         EqualsProto(R"pb(literal: " ")pb"),
+                                         EqualsProto(R"pb(str: "077")pb"),
+                                         EqualsProto(R"pb(literal: " ")pb"),
+                                         EqualsProto(R"pb(str: "77")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << std::oct                         //
@@ -1224,12 +1218,12 @@
   EXPECT_CALL(test_sink,
               Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
                          TextMessage(Eq("0xbeef 0XBEEF 0xbeef")),
-                         ENCODED_MESSAGE(EqualsProto(
-                             R"pb(value { str: "0xbeef" }
-                                  value { literal: " " }
-                                  value { str: "0XBEEF" }
-                                  value { literal: " " }
-                                  value { str: "0xbeef" })pb")))));
+                         ENCODED_MESSAGE(HasValues(ElementsAre(
+                             EqualsProto(R"pb(str: "0xbeef")pb"),
+                             EqualsProto(R"pb(literal: " ")pb"),
+                             EqualsProto(R"pb(str: "0XBEEF")pb"),
+                             EqualsProto(R"pb(literal: " ")pb"),
+                             EqualsProto(R"pb(str: "0xbeef")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << std::hex                          //
@@ -1245,13 +1239,11 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << std::fixed << value;
 
-  EXPECT_CALL(
-      test_sink,
-      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                 TextMessage(Eq("77000000.000000")),
-                 ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                    str: "77000000.000000"
-                                                  })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("77000000.000000")),
+                         ENCODED_MESSAGE(HasValues(ElementsAre(
+                             EqualsProto(R"pb(str: "77000000.000000")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << std::fixed << value;
@@ -1267,9 +1259,8 @@
   EXPECT_CALL(test_sink,
               Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
                          TextMessage(Eq("7.700000e+07")),
-                         ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                            str: "7.700000e+07"
-                                                          })pb")))));
+                         ENCODED_MESSAGE(HasValues(ElementsAre(
+                             EqualsProto(R"pb(str: "7.700000e+07")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << std::scientific << value;
@@ -1295,11 +1286,9 @@
       Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
                  TextMessage(AnyOf(Eq("0x1.25bb50p+26"), Eq("0x1.25bb5p+26"),
                                    Eq("0x1.25bb500000000p+26"))),
-                 ENCODED_MESSAGE(
-                     AnyOf(EqualsProto(R"pb(value { str: "0x1.25bb5p+26" })pb"),
-                           EqualsProto(R"pb(value {
-                                              str: "0x1.25bb500000000p+26"
-                                            })pb"))))));
+                 ENCODED_MESSAGE(HasValues(ElementsAre(AnyOf(
+                     EqualsProto(R"pb(str: "0x1.25bb5p+26")pb"),
+                     EqualsProto(R"pb(str: "0x1.25bb500000000p+26")pb"))))))));
 
   test_sink.StartCapturingLogs();
 
@@ -1328,11 +1317,9 @@
       Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
                  TextMessage(AnyOf(Eq("0x1.25bb50p+26"), Eq("0x1.25bb5p+26"),
                                    Eq("0x1.25bb500000000p+26"))),
-                 ENCODED_MESSAGE(
-                     AnyOf(EqualsProto(R"pb(value { str: "0x1.25bb5p+26" })pb"),
-                           EqualsProto(R"pb(value {
-                                              str: "0x1.25bb500000000p+26"
-                                            })pb"))))));
+                 ENCODED_MESSAGE(HasValues(ElementsAre(AnyOf(
+                     EqualsProto(R"pb(str: "0x1.25bb5p+26")pb"),
+                     EqualsProto(R"pb(str: "0x1.25bb500000000p+26")pb"))))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << std::hexfloat << value;
@@ -1349,9 +1336,8 @@
   EXPECT_CALL(test_sink,
               Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
                          TextMessage(Eq("7.7e+07")),
-                         ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                            str: "7.7e+07"
-                                                          })pb")))));
+                         ENCODED_MESSAGE(HasValues(ElementsAre(
+                             EqualsProto(R"pb(str: "7.7e+07")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << std::hexfloat << std::defaultfloat << value;
@@ -1363,11 +1349,11 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << std::ends;
 
-  EXPECT_CALL(
-      test_sink,
-      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                 TextMessage(Eq(absl::string_view("\0", 1))),
-                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "\0" })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq(absl::string_view("\0", 1))),
+                         ENCODED_MESSAGE(HasValues(
+                             ElementsAre(EqualsProto(R"pb(str: "\0")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << std::ends;
@@ -1384,7 +1370,8 @@
       Send(AllOf(
           TextMessage(MatchesOstream(comparison_stream)),
           TextMessage(Eq("\n")),
-          ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "\n" })pb")))));
+          ENCODED_MESSAGE(HasValues(ElementsAre(EqualsProto(R"pb(str:
+          "\n")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << std::endl;
@@ -1408,10 +1395,10 @@
           // `std::setiosflags` and `std::resetiosflags` aren't manipulators.
           // We're unable to distinguish their return type(s) from arbitrary
           // user-defined types and thus don't suppress the empty str value.
-          ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "0x77" }
-                                           value { literal: " " }
-                                           value { str: "119" }
-          )pb")))));
+          ENCODED_MESSAGE(
+              HasValues(ElementsAre(EqualsProto(R"pb(str: "0x77")pb"),
+                                    EqualsProto(R"pb(literal: " ")pb"),
+                                    EqualsProto(R"pb(str: "119")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << std::resetiosflags(std::ios_base::basefield)
@@ -1435,10 +1422,10 @@
                  // `std::setbase` isn't a manipulator.  We're unable to
                  // distinguish its return type from arbitrary user-defined
                  // types and thus don't suppress the empty str value.
-                 ENCODED_MESSAGE(EqualsProto(
-                     R"pb(value { str: "0x77" }
-                          value { literal: " " }
-                          value { str: "119" })pb")))));
+                 ENCODED_MESSAGE(HasValues(
+                     ElementsAre(EqualsProto(R"pb(str: "0x77")pb"),
+                                 EqualsProto(R"pb(literal: " ")pb"),
+                                 EqualsProto(R"pb(str: "119")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << std::setbase(16) << value << " "  //
@@ -1454,13 +1441,13 @@
 
   EXPECT_CALL(
       test_sink,
-      Send(AllOf(
-          TextMessage(MatchesOstream(comparison_stream)),
-          TextMessage(Eq("6.022e+23")),
-          // `std::setprecision` isn't a manipulator.  We're unable to
-          // distinguish its return type from arbitrary user-defined
-          // types and thus don't suppress the empty str value.
-          ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "6.022e+23" })pb")))));
+      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                 TextMessage(Eq("6.022e+23")),
+                 // `std::setprecision` isn't a manipulator.  We're unable to
+                 // distinguish its return type from arbitrary user-defined
+                 // types and thus don't suppress the empty str value.
+                 ENCODED_MESSAGE(HasValues(
+                     ElementsAre(EqualsProto(R"pb(str: "6.022e+23")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << std::setprecision(4) << value;
@@ -1473,12 +1460,11 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << std::setprecision(200) << value;
 
-  EXPECT_CALL(
-      test_sink,
-      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                 TextMessage(Eq("602214085700000015187968")),
-                 ENCODED_MESSAGE(EqualsProto(
-                     R"pb(value { str: "602214085700000015187968" })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("602214085700000015187968")),
+                         ENCODED_MESSAGE(HasValues(ElementsAre(EqualsProto(
+                             R"pb(str: "602214085700000015187968")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << std::setprecision(200) << value;
@@ -1493,13 +1479,13 @@
 
   EXPECT_CALL(
       test_sink,
-      Send(AllOf(
-          TextMessage(MatchesOstream(comparison_stream)),
-          TextMessage(Eq("      77")),
-          // `std::setw` isn't a manipulator.  We're unable to
-          // distinguish its return type from arbitrary user-defined
-          // types and thus don't suppress the empty str value.
-          ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "      77" })pb")))));
+      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                 TextMessage(Eq("      77")),
+                 // `std::setw` isn't a manipulator.  We're unable to
+                 // distinguish its return type from arbitrary user-defined
+                 // types and thus don't suppress the empty str value.
+                 ENCODED_MESSAGE(HasValues(
+                     ElementsAre(EqualsProto(R"pb(str: "      77")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << std::setw(8) << value;
@@ -1515,9 +1501,8 @@
   EXPECT_CALL(test_sink,
               Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
                          TextMessage(Eq("-77     ")),
-                         ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                            str: "-77     "
-                                                          })pb")))));
+                         ENCODED_MESSAGE(HasValues(ElementsAre(
+                             EqualsProto(R"pb(str: "-77     ")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << std::left << std::setw(8) << value;
@@ -1533,9 +1518,8 @@
   EXPECT_CALL(test_sink,
               Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
                          TextMessage(Eq("     -77")),
-                         ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                            str: "     -77"
-                                                          })pb")))));
+                         ENCODED_MESSAGE(HasValues(ElementsAre(
+                             EqualsProto(R"pb(str: "     -77")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << std::right << std::setw(8) << value;
@@ -1551,9 +1535,8 @@
   EXPECT_CALL(test_sink,
               Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
                          TextMessage(Eq("-     77")),
-                         ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                            str: "-     77"
-                                                          })pb")))));
+                         ENCODED_MESSAGE(HasValues(ElementsAre(
+                             EqualsProto(R"pb(str: "-     77")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << std::internal << std::setw(8) << value;
@@ -1573,9 +1556,8 @@
                          // unable to distinguish its return
                          // type from arbitrary user-defined types and
                          // thus don't suppress the empty str value.
-                         ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                            str: "00000077"
-                                                          })pb")))));
+                         ENCODED_MESSAGE(HasValues(ElementsAre(
+                             EqualsProto(R"pb(str: "00000077")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << std::setfill('0') << std::setw(8) << value;
@@ -1596,10 +1578,10 @@
   EXPECT_CALL(test_sink,
               Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
                          TextMessage(Eq("FromCustomClass{} 0x77")),
-                         ENCODED_MESSAGE(EqualsProto(
-                             R"pb(value { str: "FromCustomClass{}" }
-                                  value { literal: " " }
-                                  value { str: "0x77" })pb")))));
+                         ENCODED_MESSAGE(HasValues(ElementsAre(
+                             EqualsProto(R"pb(str: "FromCustomClass{}")pb"),
+                             EqualsProto(R"pb(literal: " ")pb"),
+                             EqualsProto(R"pb(str: "0x77")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value << " " << 0x77;
@@ -1615,11 +1597,11 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << value << 77;
 
-  EXPECT_CALL(
-      test_sink,
-      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
-                 TextMessage(Eq("77")),
-                 ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "77" })pb")))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(Eq("77")),
+                         ENCODED_MESSAGE(HasValues(
+                             ElementsAre(EqualsProto(R"pb(str: "77")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value << 77;
@@ -1642,9 +1624,9 @@
 
   EXPECT_CALL(
       test_sink,
-      Send(AllOf(
-          TextMessage(Eq("(10, 20)")), TextMessage(Eq(absl::StrCat(p))),
-          ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "(10, 20)" })pb")))));
+      Send(AllOf(TextMessage(Eq("(10, 20)")), TextMessage(Eq(absl::StrCat(p))),
+                 ENCODED_MESSAGE(HasValues(
+                     ElementsAre(EqualsProto(R"pb(str: "(10, 20)")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << std::hex << p;
diff --git a/absl/log/log_modifier_methods_test.cc b/absl/log/log_modifier_methods_test.cc
index 42e13b1..4ccde40 100644
--- a/absl/log/log_modifier_methods_test.cc
+++ b/absl/log/log_modifier_methods_test.cc
@@ -156,9 +156,11 @@
                  Timestamp(Eq(absl::UnixEpoch())),
                  ThreadID(Eq(absl::LogEntry::tid_t{456})),
                  TextMessage(Eq("forwarded: hello world")), Verbosity(Eq(7)),
-                 ENCODED_MESSAGE(
-                     EqualsProto(R"pb(value { literal: "forwarded: " }
-                                      value { str: "hello world" })pb")))));
+                 ENCODED_MESSAGE(MatchesEvent(
+                     Eq("fake/file"), Eq(123), Eq(absl::UnixEpoch()),
+                     Eq(logging::proto::WARNING), Eq(456),
+                     ElementsAre(EqualsProto(R"pb(literal: "forwarded: ")pb"),
+                                 EqualsProto(R"pb(str: "hello world")pb")))))));
 
   test_sink.StartCapturingLogs();
   LOG(WARNING)
@@ -179,25 +181,15 @@
       Send(AllOf(TextMessage(AnyOf(Eq("hello world: Bad file number [9]"),
                                    Eq("hello world: Bad file descriptor [9]"),
                                    Eq("hello world: Bad file descriptor [8]"))),
-                 ENCODED_MESSAGE(
-                     AnyOf(EqualsProto(R"pb(value { literal: "hello world" }
-                                            value { literal: ": " }
-                                            value { str: "Bad file number" }
-                                            value { literal: " [" }
-                                            value { str: "9" }
-                                            value { literal: "]" })pb"),
-                           EqualsProto(R"pb(value { literal: "hello world" }
-                                            value { literal: ": " }
-                                            value { str: "Bad file descriptor" }
-                                            value { literal: " [" }
-                                            value { str: "9" }
-                                            value { literal: "]" })pb"),
-                           EqualsProto(R"pb(value { literal: "hello world" }
-                                            value { literal: ": " }
-                                            value { str: "Bad file descriptor" }
-                                            value { literal: " [" }
-                                            value { str: "8" }
-                                            value { literal: "]" })pb"))))));
+                 ENCODED_MESSAGE(HasValues(ElementsAre(
+                     EqualsProto(R"pb(literal: "hello world")pb"),
+                     EqualsProto(R"pb(literal: ": ")pb"),
+                     AnyOf(EqualsProto(R"pb(str: "Bad file number")pb"),
+                           EqualsProto(R"pb(str: "Bad file descriptor")pb")),
+                     EqualsProto(R"pb(literal: " [")pb"),
+                     AnyOf(EqualsProto(R"pb(str: "8")pb"),
+                           EqualsProto(R"pb(str: "9")pb")),
+                     EqualsProto(R"pb(literal: "]")pb")))))));
 
   test_sink.StartCapturingLogs();
   errno = EBADF;
diff --git a/absl/log/log_sink.h b/absl/log/log_sink.h
index 9bfa6f8..2910070 100644
--- a/absl/log/log_sink.h
+++ b/absl/log/log_sink.h
@@ -32,15 +32,16 @@
 // `absl::LogSink` is an interface which can be extended to intercept and
 // process particular messages (with `LOG.ToSinkOnly()` or
 // `LOG.ToSinkAlso()`) or all messages (if registered with
-// `absl::AddLogSink`).  Implementations must be thread-safe, and should take
-// care not to take any locks that might be held by the `LOG` caller.
+// `absl::AddLogSink`).  Implementations must not take any locks that might be
+// held by the `LOG` caller.
 class LogSink {
  public:
   virtual ~LogSink() = default;
 
   // LogSink::Send()
   //
-  // `Send` is called synchronously during the log statement.
+  // `Send` is called synchronously during the log statement.  `Send` must be
+  // thread-safe.
   //
   // It is safe to use `LOG` within an implementation of `Send`.  `ToSinkOnly`
   // and `ToSinkAlso` are safe in general but can be used to create an infinite
@@ -50,9 +51,15 @@
   // LogSink::Flush()
   //
   // Sinks that buffer messages should override this method to flush the buffer
-  // and return.
+  // and return.  `Flush` must be thread-safe.
   virtual void Flush() {}
 
+ protected:
+  LogSink() = default;
+  // Implementations may be copyable and/or movable.
+  LogSink(const LogSink&) = default;
+  LogSink& operator=(const LogSink&) = default;
+
  private:
   // https://lld.llvm.org/missingkeyfunction.html#missing-key-function
   virtual void KeyFunction() const final;  // NOLINT(readability/inheritance)
diff --git a/absl/log/log_streamer_test.cc b/absl/log/log_streamer_test.cc
index 40c7d48..b9b9428 100644
--- a/absl/log/log_streamer_test.cc
+++ b/absl/log/log_streamer_test.cc
@@ -38,6 +38,7 @@
 #if GTEST_HAS_DEATH_TEST
 using ::absl::log_internal::DiedOfFatal;
 #endif
+using ::absl::log_internal::InMatchWindow;
 using ::absl::log_internal::LogSeverity;
 using ::absl::log_internal::Prefix;
 using ::absl::log_internal::SourceFilename;
@@ -45,7 +46,8 @@
 using ::absl::log_internal::Stacktrace;
 using ::absl::log_internal::TextMessage;
 using ::absl::log_internal::ThreadID;
-using ::absl::log_internal::TimestampInMatchWindow;
+using ::absl::log_internal::Timestamp;
+using ::testing::_;
 using ::testing::AnyNumber;
 using ::testing::Eq;
 using ::testing::HasSubstr;
@@ -67,15 +69,17 @@
 
   EXPECT_CALL(
       test_sink,
-      Send(AllOf(SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
-                 Prefix(IsTrue()), LogSeverity(Eq(absl::LogSeverity::kInfo)),
-                 TimestampInMatchWindow(),
-                 ThreadID(Eq(absl::base_internal::GetTID())),
-                 TextMessage(Eq("WriteToStream: foo")),
-                 ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                    str: "WriteToStream: foo"
-                                                  })pb")),
-                 Stacktrace(IsEmpty()))));
+      Send(AllOf(
+          SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
+          Prefix(IsTrue()), LogSeverity(Eq(absl::LogSeverity::kInfo)),
+          Timestamp(InMatchWindow()),
+          ThreadID(Eq(absl::base_internal::GetTID())),
+          TextMessage(Eq("WriteToStream: foo")),
+          ENCODED_MESSAGE(MatchesEvent(
+              Eq("path/file.cc"), Eq(1234), InMatchWindow(),
+              Eq(logging::proto::INFO), Eq(absl::base_internal::GetTID()),
+              ElementsAre(EqualsProto(R"pb(str: "WriteToStream: foo")pb")))),
+          Stacktrace(IsEmpty()))));
 
   test_sink.StartCapturingLogs();
   WriteToStream("foo", &absl::LogInfoStreamer("path/file.cc", 1234).stream());
@@ -86,15 +90,17 @@
 
   EXPECT_CALL(
       test_sink,
-      Send(AllOf(SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
-                 Prefix(IsTrue()), LogSeverity(Eq(absl::LogSeverity::kWarning)),
-                 TimestampInMatchWindow(),
-                 ThreadID(Eq(absl::base_internal::GetTID())),
-                 TextMessage(Eq("WriteToStream: foo")),
-                 ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                    str: "WriteToStream: foo"
-                                                  })pb")),
-                 Stacktrace(IsEmpty()))));
+      Send(AllOf(
+          SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
+          Prefix(IsTrue()), LogSeverity(Eq(absl::LogSeverity::kWarning)),
+          Timestamp(InMatchWindow()),
+          ThreadID(Eq(absl::base_internal::GetTID())),
+          TextMessage(Eq("WriteToStream: foo")),
+          ENCODED_MESSAGE(MatchesEvent(
+              Eq("path/file.cc"), Eq(1234), InMatchWindow(),
+              Eq(logging::proto::WARNING), Eq(absl::base_internal::GetTID()),
+              ElementsAre(EqualsProto(R"pb(str: "WriteToStream: foo")pb")))),
+          Stacktrace(IsEmpty()))));
 
   test_sink.StartCapturingLogs();
   WriteToStream("foo",
@@ -106,15 +112,17 @@
 
   EXPECT_CALL(
       test_sink,
-      Send(AllOf(SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
-                 Prefix(IsTrue()), LogSeverity(Eq(absl::LogSeverity::kError)),
-                 TimestampInMatchWindow(),
-                 ThreadID(Eq(absl::base_internal::GetTID())),
-                 TextMessage(Eq("WriteToStream: foo")),
-                 ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                    str: "WriteToStream: foo"
-                                                  })pb")),
-                 Stacktrace(IsEmpty()))));
+      Send(AllOf(
+          SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
+          Prefix(IsTrue()), LogSeverity(Eq(absl::LogSeverity::kError)),
+          Timestamp(InMatchWindow()),
+          ThreadID(Eq(absl::base_internal::GetTID())),
+          TextMessage(Eq("WriteToStream: foo")),
+          ENCODED_MESSAGE(MatchesEvent(
+              Eq("path/file.cc"), Eq(1234), InMatchWindow(),
+              Eq(logging::proto::ERROR), Eq(absl::base_internal::GetTID()),
+              ElementsAre(EqualsProto(R"pb(str: "WriteToStream: foo")pb")))),
+          Stacktrace(IsEmpty()))));
 
   test_sink.StartCapturingLogs();
   WriteToStream("foo", &absl::LogErrorStreamer("path/file.cc", 1234).stream());
@@ -130,17 +138,19 @@
             .Times(AnyNumber())
             .WillRepeatedly(DeathTestUnexpectedLogging());
 
-        EXPECT_CALL(
-            test_sink,
-            Send(AllOf(
-                SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
-                Prefix(IsTrue()), LogSeverity(Eq(absl::LogSeverity::kFatal)),
-                TimestampInMatchWindow(),
-                ThreadID(Eq(absl::base_internal::GetTID())),
-                TextMessage(Eq("WriteToStream: foo")),
-                ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                   str: "WriteToStream: foo"
-                                                 })pb")))))
+        EXPECT_CALL(test_sink,
+                    Send(AllOf(SourceFilename(Eq("path/file.cc")),
+                               SourceLine(Eq(1234)), Prefix(IsTrue()),
+                               LogSeverity(Eq(absl::LogSeverity::kFatal)),
+                               Timestamp(InMatchWindow()),
+                               ThreadID(Eq(absl::base_internal::GetTID())),
+                               TextMessage(Eq("WriteToStream: foo")),
+                               ENCODED_MESSAGE(MatchesEvent(
+                                   Eq("path/file.cc"), Eq(1234),
+                                   InMatchWindow(), Eq(logging::proto::FATAL),
+                                   Eq(absl::base_internal::GetTID()),
+                                   ElementsAre(EqualsProto(
+                                       R"pb(str: "WriteToStream: foo")pb")))))))
             .WillOnce(DeathTestExpectedLogging());
 
         test_sink.StartCapturingLogs();
@@ -157,15 +167,17 @@
 
   EXPECT_CALL(
       test_sink,
-      Send(AllOf(SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
-                 Prefix(IsTrue()), LogSeverity(Eq(absl::LogSeverity::kError)),
-                 TimestampInMatchWindow(),
-                 ThreadID(Eq(absl::base_internal::GetTID())),
-                 TextMessage(Eq("WriteToStream: foo")),
-                 ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                    str: "WriteToStream: foo"
-                                                  })pb")),
-                 Stacktrace(IsEmpty()))));
+      Send(AllOf(
+          SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
+          Prefix(IsTrue()), LogSeverity(Eq(absl::LogSeverity::kError)),
+          Timestamp(InMatchWindow()),
+          ThreadID(Eq(absl::base_internal::GetTID())),
+          TextMessage(Eq("WriteToStream: foo")),
+          ENCODED_MESSAGE(MatchesEvent(
+              Eq("path/file.cc"), Eq(1234), InMatchWindow(),
+              Eq(logging::proto::ERROR), Eq(absl::base_internal::GetTID()),
+              ElementsAre(EqualsProto(R"pb(str: "WriteToStream: foo")pb")))),
+          Stacktrace(IsEmpty()))));
 
   test_sink.StartCapturingLogs();
   WriteToStream("foo",
@@ -181,17 +193,19 @@
             .Times(AnyNumber())
             .WillRepeatedly(DeathTestUnexpectedLogging());
 
-        EXPECT_CALL(
-            test_sink,
-            Send(AllOf(
-                SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
-                Prefix(IsTrue()), LogSeverity(Eq(absl::LogSeverity::kFatal)),
-                TimestampInMatchWindow(),
-                ThreadID(Eq(absl::base_internal::GetTID())),
-                TextMessage(Eq("WriteToStream: foo")),
-                ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                   str: "WriteToStream: foo"
-                                                 })pb")))))
+        EXPECT_CALL(test_sink,
+                    Send(AllOf(SourceFilename(Eq("path/file.cc")),
+                               SourceLine(Eq(1234)), Prefix(IsTrue()),
+                               LogSeverity(Eq(absl::LogSeverity::kFatal)),
+                               Timestamp(InMatchWindow()),
+                               ThreadID(Eq(absl::base_internal::GetTID())),
+                               TextMessage(Eq("WriteToStream: foo")),
+                               ENCODED_MESSAGE(MatchesEvent(
+                                   Eq("path/file.cc"), Eq(1234),
+                                   InMatchWindow(), Eq(logging::proto::FATAL),
+                                   Eq(absl::base_internal::GetTID()),
+                                   ElementsAre(EqualsProto(
+                                       R"pb(str: "WriteToStream: foo")pb")))))))
             .WillOnce(DeathTestExpectedLogging());
 
         test_sink.StartCapturingLogs();
@@ -207,15 +221,17 @@
 
   EXPECT_CALL(
       test_sink,
-      Send(AllOf(SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
-                 Prefix(IsTrue()), LogSeverity(Eq(absl::LogSeverity::kError)),
-                 TimestampInMatchWindow(),
-                 ThreadID(Eq(absl::base_internal::GetTID())),
-                 TextMessage(Eq("WriteToStream: foo")),
-                 ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                    str: "WriteToStream: foo"
-                                                  })pb")),
-                 Stacktrace(IsEmpty()))));
+      Send(AllOf(
+          SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
+          Prefix(IsTrue()), LogSeverity(Eq(absl::LogSeverity::kError)),
+          Timestamp(InMatchWindow()),
+          ThreadID(Eq(absl::base_internal::GetTID())),
+          TextMessage(Eq("WriteToStream: foo")),
+          ENCODED_MESSAGE(MatchesEvent(
+              Eq("path/file.cc"), Eq(1234), InMatchWindow(),
+              Eq(logging::proto::ERROR), Eq(absl::base_internal::GetTID()),
+              ElementsAre(EqualsProto(R"pb(str: "WriteToStream: foo")pb")))),
+          Stacktrace(IsEmpty()))));
 
   test_sink.StartCapturingLogs();
   WriteToStream(
@@ -233,17 +249,19 @@
             .Times(AnyNumber())
             .WillRepeatedly(DeathTestUnexpectedLogging());
 
-        EXPECT_CALL(
-            test_sink,
-            Send(AllOf(
-                SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
-                Prefix(IsTrue()), LogSeverity(Eq(absl::LogSeverity::kFatal)),
-                TimestampInMatchWindow(),
-                ThreadID(Eq(absl::base_internal::GetTID())),
-                TextMessage(Eq("WriteToStream: foo")),
-                ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                   str: "WriteToStream: foo"
-                                                 })pb")))))
+        EXPECT_CALL(test_sink,
+                    Send(AllOf(SourceFilename(Eq("path/file.cc")),
+                               SourceLine(Eq(1234)), Prefix(IsTrue()),
+                               LogSeverity(Eq(absl::LogSeverity::kFatal)),
+                               Timestamp(InMatchWindow()),
+                               ThreadID(Eq(absl::base_internal::GetTID())),
+                               TextMessage(Eq("WriteToStream: foo")),
+                               ENCODED_MESSAGE(MatchesEvent(
+                                   Eq("path/file.cc"), Eq(1234),
+                                   InMatchWindow(), Eq(logging::proto::FATAL),
+                                   Eq(absl::base_internal::GetTID()),
+                                   ElementsAre(EqualsProto(
+                                       R"pb(str: "WriteToStream: foo")pb")))))))
             .WillOnce(DeathTestExpectedLogging());
 
         test_sink.StartCapturingLogs();
@@ -260,12 +278,13 @@
 
   EXPECT_CALL(
       test_sink,
-      Send(AllOf(SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
-                 TextMessage(Eq("WriteToStreamRef: foo")),
-                 ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                    str: "WriteToStreamRef: foo"
-                                                  })pb")),
-                 Stacktrace(IsEmpty()))));
+      Send(AllOf(
+          SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
+          TextMessage(Eq("WriteToStreamRef: foo")),
+          ENCODED_MESSAGE(MatchesEvent(
+              Eq("path/file.cc"), Eq(1234), _, _, _,
+              ElementsAre(EqualsProto(R"pb(str: "WriteToStreamRef: foo")pb")))),
+          Stacktrace(IsEmpty()))));
 
   test_sink.StartCapturingLogs();
   WriteToStreamRef("foo", absl::LogInfoStreamer("path/file.cc", 1234).stream());
@@ -284,13 +303,14 @@
   // test would fail.
   EXPECT_CALL(
       test_sink,
-      Send(AllOf(SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
-                 TextMessage(Eq("WriteToStream: foo WriteToStreamRef: bar")),
-                 ENCODED_MESSAGE(EqualsProto(
-                     R"pb(value {
-                            str: "WriteToStream: foo WriteToStreamRef: bar"
-                          })pb")),
-                 Stacktrace(IsEmpty()))));
+      Send(AllOf(
+          SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
+          TextMessage(Eq("WriteToStream: foo WriteToStreamRef: bar")),
+          ENCODED_MESSAGE(MatchesEvent(
+              Eq("path/file.cc"), Eq(1234), _, _, _,
+              ElementsAre(EqualsProto(
+                  R"pb(str: "WriteToStream: foo WriteToStreamRef: bar")pb")))),
+          Stacktrace(IsEmpty()))));
 
   test_sink.StartCapturingLogs();
 }
@@ -311,12 +331,13 @@
 TEST(LogStreamerTest, LogsEmptyLine) {
   absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
 
-  EXPECT_CALL(test_sink, Send(AllOf(SourceFilename(Eq("path/file.cc")),
-                                    SourceLine(Eq(1234)), TextMessage(Eq("")),
-                                    ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                                       str: ""
-                                                                     })pb")),
-                                    Stacktrace(IsEmpty()))));
+  EXPECT_CALL(test_sink,
+              Send(AllOf(SourceFilename(Eq("path/file.cc")),
+                         SourceLine(Eq(1234)), TextMessage(Eq("")),
+                         ENCODED_MESSAGE(MatchesEvent(
+                             Eq("path/file.cc"), Eq(1234), _, _, _,
+                             ElementsAre(EqualsProto(R"pb(str: "")pb")))),
+                         Stacktrace(IsEmpty()))));
 
   test_sink.StartCapturingLogs();
   absl::LogInfoStreamer("path/file.cc", 1234);
@@ -334,9 +355,10 @@
 
         EXPECT_CALL(
             test_sink,
-            Send(AllOf(
-                SourceFilename(Eq("path/file.cc")), TextMessage(Eq("")),
-                ENCODED_MESSAGE(EqualsProto(R"pb(value { str: "" })pb")))))
+            Send(AllOf(SourceFilename(Eq("path/file.cc")), TextMessage(Eq("")),
+                       ENCODED_MESSAGE(MatchesEvent(
+                           Eq("path/file.cc"), _, _, _, _,
+                           ElementsAre(EqualsProto(R"pb(str: "")pb")))))))
             .WillOnce(DeathTestExpectedLogging());
 
         test_sink.StartCapturingLogs();
@@ -352,13 +374,14 @@
 
   EXPECT_CALL(
       test_sink,
-      Send(AllOf(SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
-                 LogSeverity(Eq(absl::LogSeverity::kInfo)),
-                 TextMessage(Eq("hello 0x10 world 0x10")),
-                 ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                    str: "hello 0x10 world 0x10"
-                                                  })pb")),
-                 Stacktrace(IsEmpty()))));
+      Send(AllOf(
+          SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
+          LogSeverity(Eq(absl::LogSeverity::kInfo)),
+          TextMessage(Eq("hello 0x10 world 0x10")),
+          ENCODED_MESSAGE(MatchesEvent(
+              Eq("path/file.cc"), Eq(1234), _, Eq(logging::proto::INFO), _,
+              ElementsAre(EqualsProto(R"pb(str: "hello 0x10 world 0x10")pb")))),
+          Stacktrace(IsEmpty()))));
 
   test_sink.StartCapturingLogs();
   auto streamer1 = absl::LogInfoStreamer("path/file.cc", 1234);
@@ -373,22 +396,24 @@
   testing::InSequence seq;
   EXPECT_CALL(
       test_sink,
-      Send(AllOf(SourceFilename(Eq("path/file2.cc")), SourceLine(Eq(5678)),
-                 LogSeverity(Eq(absl::LogSeverity::kWarning)),
-                 TextMessage(Eq("something else")),
-                 ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                    str: "something else"
-                                                  })pb")),
-                 Stacktrace(IsEmpty()))));
+      Send(AllOf(
+          SourceFilename(Eq("path/file2.cc")), SourceLine(Eq(5678)),
+          LogSeverity(Eq(absl::LogSeverity::kWarning)),
+          TextMessage(Eq("something else")),
+          ENCODED_MESSAGE(MatchesEvent(
+              Eq("path/file2.cc"), Eq(5678), _, Eq(logging::proto::WARNING), _,
+              ElementsAre(EqualsProto(R"pb(str: "something else")pb")))),
+          Stacktrace(IsEmpty()))));
   EXPECT_CALL(
       test_sink,
-      Send(AllOf(SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
-                 LogSeverity(Eq(absl::LogSeverity::kInfo)),
-                 TextMessage(Eq("hello 0x10 world 0x10")),
-                 ENCODED_MESSAGE(EqualsProto(R"pb(value {
-                                                    str: "hello 0x10 world 0x10"
-                                                  })pb")),
-                 Stacktrace(IsEmpty()))));
+      Send(AllOf(
+          SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
+          LogSeverity(Eq(absl::LogSeverity::kInfo)),
+          TextMessage(Eq("hello 0x10 world 0x10")),
+          ENCODED_MESSAGE(MatchesEvent(
+              Eq("path/file.cc"), Eq(1234), _, Eq(logging::proto::INFO), _,
+              ElementsAre(EqualsProto(R"pb(str: "hello 0x10 world 0x10")pb")))),
+          Stacktrace(IsEmpty()))));
 
   test_sink.StartCapturingLogs();
   auto streamer1 = absl::LogInfoStreamer("path/file.cc", 1234);
diff --git a/absl/log/structured_test.cc b/absl/log/structured_test.cc
index 490a35d..6f1df18 100644
--- a/absl/log/structured_test.cc
+++ b/absl/log/structured_test.cc
@@ -30,6 +30,7 @@
 namespace {
 using ::absl::log_internal::MatchesOstream;
 using ::absl::log_internal::TextMessage;
+using ::testing::ElementsAre;
 using ::testing::Eq;
 
 auto *test_env ABSL_ATTRIBUTE_UNUSED = ::testing::AddGlobalTestEnvironment(
@@ -53,8 +54,8 @@
   EXPECT_CALL(sink,
               Send(AllOf(TextMessage(MatchesOstream(stream)),
                          TextMessage(Eq("hello world")),
-                         ENCODED_MESSAGE(EqualsProto(
-                             R"pb(value { literal: "hello world" })pb")))));
+                         ENCODED_MESSAGE(HasValues(ElementsAre(
+                             EqualsProto(R"pb(literal: "hello world")pb")))))));
 
   sink.StartCapturingLogs();
   LOG(INFO) << absl::LogAsLiteral(not_a_literal);
diff --git a/absl/log/vlog_is_on.h b/absl/log/vlog_is_on.h
index 7898651..f7539df 100644
--- a/absl/log/vlog_is_on.h
+++ b/absl/log/vlog_is_on.h
@@ -46,12 +46,12 @@
 // Files which do not match any pattern in `--vmodule` use the value of `--v` as
 // their effective verbosity level.  The default is 0.
 //
-// SetVLOGLevel helper function is provided to do limited dynamic control over
+// SetVLogLevel helper function is provided to do limited dynamic control over
 // V-logging by appending to `--vmodule`. Because these go at the beginning of
 // the list, they take priority over any globs previously added.
 //
 // Resetting --vmodule will override all previous modifications to `--vmodule`,
-// including via SetVLOGLevel.
+// including via SetVLogLevel.
 
 #ifndef ABSL_LOG_VLOG_IS_ON_H_
 #define ABSL_LOG_VLOG_IS_ON_H_
@@ -63,7 +63,7 @@
 // Each VLOG_IS_ON call site gets its own VLogSite that registers with the
 // global linked list of sites to asynchronously update its verbosity level on
 // changes to --v or --vmodule. The verbosity can also be set by manually
-// calling SetVLOGLevel.
+// calling SetVLogLevel.
 //
 // VLOG_IS_ON is not async signal safe, but it is guaranteed not to allocate
 // new memory.
diff --git a/absl/meta/type_traits.h b/absl/meta/type_traits.h
index cf71164..ded5582 100644
--- a/absl/meta/type_traits.h
+++ b/absl/meta/type_traits.h
@@ -37,11 +37,21 @@
 
 #include <cstddef>
 #include <functional>
+#include <string>
 #include <type_traits>
+#include <vector>
 
 #include "absl/base/attributes.h"
 #include "absl/base/config.h"
 
+#ifdef __cpp_lib_span
+#include <span>  // NOLINT(build/c++20)
+#endif
+
+#ifdef ABSL_HAVE_STD_STRING_VIEW
+#include <string_view>
+#endif
+
 // Defines the default alignment. `__STDCPP_DEFAULT_NEW_ALIGNMENT__` is a C++17
 // feature.
 #if defined(__STDCPP_DEFAULT_NEW_ALIGNMENT__)
@@ -152,8 +162,8 @@
 struct disjunction : std::false_type {};
 
 template <typename T, typename... Ts>
-struct disjunction<T, Ts...> :
-      std::conditional<T::value, T, disjunction<Ts...>>::type {};
+struct disjunction<T, Ts...>
+    : std::conditional<T::value, T, disjunction<Ts...>>::type {};
 
 template <typename T>
 struct disjunction<T> : T {};
@@ -279,27 +289,6 @@
 template <typename T>
 using remove_all_extents_t = typename std::remove_all_extents<T>::type;
 
-ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
-namespace type_traits_internal {
-// This trick to retrieve a default alignment is necessary for our
-// implementation of aligned_storage_t to be consistent with any
-// implementation of std::aligned_storage.
-template <size_t Len, typename T = std::aligned_storage<Len>>
-struct default_alignment_of_aligned_storage;
-
-template <size_t Len, size_t Align>
-struct default_alignment_of_aligned_storage<
-    Len, std::aligned_storage<Len, Align>> {
-  static constexpr size_t value = Align;
-};
-}  // namespace type_traits_internal
-
-// TODO(b/260219225): std::aligned_storage(_t) is deprecated in C++23.
-template <size_t Len, size_t Align = type_traits_internal::
-                          default_alignment_of_aligned_storage<Len>::value>
-using aligned_storage_t = typename std::aligned_storage<Len, Align>::type;
-ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
-
 template <typename T>
 using decay_t = typename std::decay<T>::type;
 
@@ -315,22 +304,23 @@
 template <typename T>
 using underlying_type_t = typename std::underlying_type<T>::type;
 
-
 namespace type_traits_internal {
 
 #if (defined(__cpp_lib_is_invocable) && __cpp_lib_is_invocable >= 201703L) || \
     (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L)
 // std::result_of is deprecated (C++17) or removed (C++20)
-template<typename> struct result_of;
-template<typename F, typename... Args>
+template <typename>
+struct result_of;
+template <typename F, typename... Args>
 struct result_of<F(Args...)> : std::invoke_result<F, Args...> {};
 #else
-template<typename F> using result_of = std::result_of<F>;
+template <typename F>
+using result_of = std::result_of<F>;
 #endif
 
 }  // namespace type_traits_internal
 
-template<typename F>
+template <typename F>
 using result_of_t = typename type_traits_internal::result_of<F>::type;
 
 namespace type_traits_internal {
@@ -463,20 +453,23 @@
 // Make the swap-related traits/function accessible from this namespace.
 using swap_internal::IsNothrowSwappable;
 using swap_internal::IsSwappable;
-using swap_internal::Swap;
 using swap_internal::StdSwapIsUnconstrained;
+using swap_internal::Swap;
 
 }  // namespace type_traits_internal
 
 // absl::is_trivially_relocatable<T>
 //
 // Detects whether a type is known to be "trivially relocatable" -- meaning it
-// can be relocated without invoking the constructor/destructor, using a form of
-// move elision.
+// can be relocated from one place to another as if by memcpy/memmove.
+// This implies that its object representation doesn't depend on its address,
+// and also none of its special member functions do anything strange.
 //
-// This trait is conservative, for backwards compatibility. If it's true then
-// the type is definitely trivially relocatable, but if it's false then the type
-// may or may not be.
+// This trait is conservative. If it's true then the type is definitely
+// trivially relocatable, but if it's false then the type may or may not be. For
+// example, std::vector<int> is trivially relocatable on every known STL
+// implementation, but absl::is_trivially_relocatable<std::vector<int>> remains
+// false.
 //
 // Example:
 //
@@ -501,22 +494,34 @@
 //
 // TODO(b/275003464): remove the opt-out once the bug is fixed.
 //
+// Starting with Xcode 15, the Apple compiler will falsely say a type
+// with a user-provided move constructor is trivially relocatable
+// (b/324278148). We will opt out without a version check, due to
+// the fluidity of Apple versions.
+//
+// TODO(b/324278148): If all versions we use have the bug fixed, then
+// remove the condition.
+//
+// Clang on all platforms fails to detect that a type with a user-provided
+// move-assignment operator is not trivially relocatable. So in fact we
+// opt out of Clang altogether, for now.
+//
+// TODO(b/325479096): Remove the opt-out once Clang's behavior is fixed.
+//
 // According to https://github.com/abseil/abseil-cpp/issues/1479, this does not
 // work with NVCC either.
-#if ABSL_HAVE_BUILTIN(__is_trivially_relocatable) &&                 \
-    !(defined(__clang__) && (defined(_WIN32) || defined(_WIN64))) && \
-    !defined(__NVCC__)
+#if ABSL_HAVE_BUILTIN(__is_trivially_relocatable) && \
+    (defined(__cpp_impl_trivially_relocatable) ||    \
+     (!defined(__clang__) && !defined(__APPLE__) && !defined(__NVCC__)))
 template <class T>
 struct is_trivially_relocatable
     : std::integral_constant<bool, __is_trivially_relocatable(T)> {};
 #else
 // Otherwise we use a fallback that detects only those types we can feasibly
-// detect. Any time that has trivial move-construction and destruction
-// operations is by definition trivially relocatable.
+// detect. Any type that is trivially copyable is by definition trivially
+// relocatable.
 template <class T>
-struct is_trivially_relocatable
-    : absl::conjunction<absl::is_trivially_move_constructible<T>,
-                        absl::is_trivially_destructible<T>> {};
+struct is_trivially_relocatable : std::is_trivially_copyable<T> {};
 #endif
 
 // absl::is_constant_evaluated()
@@ -558,6 +563,97 @@
 #endif
 }
 #endif  // ABSL_HAVE_CONSTANT_EVALUATED
+
+namespace type_traits_internal {
+
+// Detects if a class's definition has declared itself to be an owner by
+// declaring
+//   using absl_internal_is_view = std::true_type;
+// as a member.
+// Types that don't want either must either omit this declaration entirely, or
+// (if e.g. inheriting from a base class) define the member to something that
+// isn't a Boolean trait class, such as `void`.
+// Do not specialize or use this directly. It's an implementation detail.
+template <typename T, typename = void>
+struct IsOwnerImpl : std::false_type {
+  static_assert(std::is_same<T, absl::remove_cvref_t<T>>::value,
+                "type must lack qualifiers");
+};
+
+template <typename T>
+struct IsOwnerImpl<
+    T,
+    std::enable_if_t<std::is_class<typename T::absl_internal_is_view>::value>>
+    : absl::negation<typename T::absl_internal_is_view> {};
+
+// A trait to determine whether a type is an owner.
+// Do *not* depend on the correctness of this trait for correct code behavior.
+// It is only a safety feature and its value may change in the future.
+// Do not specialize this; instead, define the member trait inside your type so
+// that it can be auto-detected, and to prevent ODR violations.
+// If it ever becomes possible to detect [[gsl::Owner]], we should leverage it:
+// https://wg21.link/p1179
+template <typename T>
+struct IsOwner : IsOwnerImpl<T> {};
+
+template <typename T, typename Traits, typename Alloc>
+struct IsOwner<std::basic_string<T, Traits, Alloc>> : std::true_type {};
+
+template <typename T, typename Alloc>
+struct IsOwner<std::vector<T, Alloc>> : std::true_type {};
+
+// Detects if a class's definition has declared itself to be a view by declaring
+//   using absl_internal_is_view = std::true_type;
+// as a member.
+// Do not specialize or use this directly.
+template <typename T, typename = void>
+struct IsViewImpl : std::false_type {
+  static_assert(std::is_same<T, absl::remove_cvref_t<T>>::value,
+                "type must lack qualifiers");
+};
+
+template <typename T>
+struct IsViewImpl<
+    T,
+    std::enable_if_t<std::is_class<typename T::absl_internal_is_view>::value>>
+    : T::absl_internal_is_view {};
+
+// A trait to determine whether a type is a view.
+// Do *not* depend on the correctness of this trait for correct code behavior.
+// It is only a safety feature, and its value may change in the future.
+// Do not specialize this trait. Instead, define the member
+//   using absl_internal_is_view = std::true_type;
+// in your class to allow its detection while preventing ODR violations.
+// If it ever becomes possible to detect [[gsl::Pointer]], we should leverage
+// it: https://wg21.link/p1179
+template <typename T>
+struct IsView : std::integral_constant<bool, std::is_pointer<T>::value ||
+                                                 IsViewImpl<T>::value> {};
+
+#ifdef ABSL_HAVE_STD_STRING_VIEW
+template <typename Char, typename Traits>
+struct IsView<std::basic_string_view<Char, Traits>> : std::true_type {};
+#endif
+
+#ifdef __cpp_lib_span
+template <typename T>
+struct IsView<std::span<T>> : std::true_type {};
+#endif
+
+// Determines whether the assignment of the given types is lifetime-bound.
+// Do *not* depend on the correctness of this trait for correct code behavior.
+// It is only a safety feature and its value may change in the future.
+// If it ever becomes possible to detect [[clang::lifetimebound]] directly,
+// we should change the implementation to leverage that.
+// Until then, we consider an assignment from an "owner" (such as std::string)
+// to a "view" (such as std::string_view) to be a lifetime-bound assignment.
+template <typename T, typename U>
+using IsLifetimeBoundAssignment =
+    std::integral_constant<bool, IsView<absl::remove_cvref_t<T>>::value &&
+                                     IsOwner<absl::remove_cvref_t<U>>::value>;
+
+}  // namespace type_traits_internal
+
 ABSL_NAMESPACE_END
 }  // namespace absl
 
diff --git a/absl/meta/type_traits_test.cc b/absl/meta/type_traits_test.cc
index 7412f33..1e056bb 100644
--- a/absl/meta/type_traits_test.cc
+++ b/absl/meta/type_traits_test.cc
@@ -26,10 +26,32 @@
 #include "absl/time/clock.h"
 #include "absl/time/time.h"
 
+#ifdef ABSL_HAVE_STD_STRING_VIEW
+#include <string_view>
+#endif
+
 namespace {
 
 using ::testing::StaticAssertTypeEq;
 
+template <typename T>
+using IsOwnerAndNotView =
+    absl::conjunction<absl::type_traits_internal::IsOwner<T>,
+                      absl::negation<absl::type_traits_internal::IsView<T>>>;
+
+static_assert(IsOwnerAndNotView<std::vector<int>>::value,
+              "vector is an owner, not a view");
+static_assert(IsOwnerAndNotView<std::string>::value,
+              "string is an owner, not a view");
+static_assert(IsOwnerAndNotView<std::wstring>::value,
+              "wstring is an owner, not a view");
+#ifdef ABSL_HAVE_STD_STRING_VIEW
+static_assert(!IsOwnerAndNotView<std::string_view>::value,
+              "string_view is a view, not an owner");
+static_assert(!IsOwnerAndNotView<std::wstring_view>::value,
+              "wstring_view is a view, not an owner");
+#endif
+
 template <class T, class U>
 struct simple_pair {
   T first;
@@ -362,8 +384,8 @@
   EXPECT_TRUE(absl::is_function<void() noexcept>::value);
   EXPECT_TRUE(absl::is_function<void(...) noexcept>::value);
 
-  EXPECT_FALSE(absl::is_function<void(*)()>::value);
-  EXPECT_FALSE(absl::is_function<void(&)()>::value);
+  EXPECT_FALSE(absl::is_function<void (*)()>::value);
+  EXPECT_FALSE(absl::is_function<void (&)()>::value);
   EXPECT_FALSE(absl::is_function<int>::value);
   EXPECT_FALSE(absl::is_function<Callable>::value);
 }
@@ -382,8 +404,8 @@
   // Does not remove const in this case.
   EXPECT_TRUE((std::is_same<typename absl::remove_cvref<const int*>::type,
                             const int*>::value));
-  EXPECT_TRUE((std::is_same<typename absl::remove_cvref<int[2]>::type,
-                            int[2]>::value));
+  EXPECT_TRUE(
+      (std::is_same<typename absl::remove_cvref<int[2]>::type, int[2]>::value));
   EXPECT_TRUE((std::is_same<typename absl::remove_cvref<int(&)[2]>::type,
                             int[2]>::value));
   EXPECT_TRUE((std::is_same<typename absl::remove_cvref<int(&&)[2]>::type,
@@ -489,76 +511,6 @@
   ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(remove_all_extents, int[][1]);
 }
 
-TEST(TypeTraitsTest, TestAlignedStorageAlias) {
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 1);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 2);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 3);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 4);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 5);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 6);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 7);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 8);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 9);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 10);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 11);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 12);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 13);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 14);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 15);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 16);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 17);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 18);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 19);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 20);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 21);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 22);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 23);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 24);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 25);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 26);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 27);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 28);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 29);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 30);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 31);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 32);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 33);
-
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 1, 128);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 2, 128);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 3, 128);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 4, 128);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 5, 128);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 6, 128);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 7, 128);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 8, 128);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 9, 128);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 10, 128);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 11, 128);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 12, 128);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 13, 128);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 14, 128);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 15, 128);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 16, 128);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 17, 128);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 18, 128);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 19, 128);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 20, 128);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 21, 128);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 22, 128);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 23, 128);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 24, 128);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 25, 128);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 26, 128);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 27, 128);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 28, 128);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 29, 128);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 30, 128);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 31, 128);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 32, 128);
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(aligned_storage, 33, 128);
-}
-
 TEST(TypeTraitsTest, TestDecay) {
   ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(decay, int);
   ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(decay, const int);
@@ -580,7 +532,7 @@
   ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(decay, int[][1]);
 
   ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(decay, int());
-  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(decay, int(float));  // NOLINT
+  ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(decay, int(float));      // NOLINT
   ABSL_INTERNAL_EXPECT_ALIAS_EQUIVALENCE(decay, int(char, ...));  // NOLINT
 }
 
@@ -664,8 +616,7 @@
 
 namespace adl_namespace {
 
-struct DeletedSwap {
-};
+struct DeletedSwap {};
 
 void swap(DeletedSwap&, DeletedSwap&) = delete;
 
@@ -751,7 +702,7 @@
 
 // User-defined types can be trivially relocatable as long as they don't have a
 // user-provided move constructor or destructor.
-TEST(TriviallyRelocatable, UserDefinedTriviallyReconstructible) {
+TEST(TriviallyRelocatable, UserDefinedTriviallyRelocatable) {
   struct S {
     int x;
     int y;
@@ -780,6 +731,30 @@
   static_assert(!absl::is_trivially_relocatable<S>::value, "");
 }
 
+// A user-provided copy assignment operator disqualifies a type from
+// being trivially relocatable.
+TEST(TriviallyRelocatable, UserProvidedCopyAssignment) {
+  struct S {
+    S(const S&) = default;
+    S& operator=(const S&) {  // NOLINT(modernize-use-equals-default)
+      return *this;
+    }
+  };
+
+  static_assert(!absl::is_trivially_relocatable<S>::value, "");
+}
+
+// A user-provided move assignment operator disqualifies a type from
+// being trivially relocatable.
+TEST(TriviallyRelocatable, UserProvidedMoveAssignment) {
+  struct S {
+    S(S&&) = default;
+    S& operator=(S&&) { return *this; }  // NOLINT(modernize-use-equals-default)
+  };
+
+  static_assert(!absl::is_trivially_relocatable<S>::value, "");
+}
+
 // A user-provided destructor disqualifies a type from being trivially
 // relocatable.
 TEST(TriviallyRelocatable, UserProvidedDestructor) {
@@ -792,17 +767,21 @@
 
 // TODO(b/275003464): remove the opt-out for Clang on Windows once
 // __is_trivially_relocatable is used there again.
+// TODO(b/324278148): remove the opt-out for Apple once
+// __is_trivially_relocatable is fixed there.
 #if defined(ABSL_HAVE_ATTRIBUTE_TRIVIAL_ABI) &&      \
     ABSL_HAVE_BUILTIN(__is_trivially_relocatable) && \
-    !(defined(__clang__) && (defined(_WIN32) || defined(_WIN64)))
+    (defined(__cpp_impl_trivially_relocatable) ||    \
+     (!defined(__clang__) && !defined(__APPLE__) && !defined(__NVCC__)))
 // A type marked with the "trivial ABI" attribute is trivially relocatable even
-// if it has user-provided move/copy constructors and a user-provided
-// destructor.
-TEST(TrivallyRelocatable, TrivialAbi) {
+// if it has user-provided special members.
+TEST(TriviallyRelocatable, TrivialAbi) {
   struct ABSL_ATTRIBUTE_TRIVIAL_ABI S {
     S(S&&) {}       // NOLINT(modernize-use-equals-default)
     S(const S&) {}  // NOLINT(modernize-use-equals-default)
-    ~S() {}         // NOLINT(modernize-use-equals-default)
+    void operator=(S&&) {}
+    void operator=(const S&) {}
+    ~S() {}  // NOLINT(modernize-use-equals-default)
   };
 
   static_assert(absl::is_trivially_relocatable<S>::value, "");
@@ -821,7 +800,7 @@
 
 #endif  // ABSL_HAVE_CONSTANT_EVALUATED
 
-TEST(TrivallyRelocatable, is_constant_evaluated) {
+TEST(IsConstantEvaluated, is_constant_evaluated) {
 #ifdef ABSL_HAVE_CONSTANT_EVALUATED
   constexpr int64_t constant = NegateIfConstantEvaluated(42);
   EXPECT_EQ(constant, -42);
@@ -837,5 +816,4 @@
 #endif  // ABSL_HAVE_CONSTANT_EVALUATED
 }
 
-
 }  // namespace
diff --git a/absl/numeric/BUILD.bazel b/absl/numeric/BUILD.bazel
index db02b9c..f202c6e 100644
--- a/absl/numeric/BUILD.bazel
+++ b/absl/numeric/BUILD.bazel
@@ -46,7 +46,7 @@
 
 cc_binary(
     name = "bits_benchmark",
-    testonly = 1,
+    testonly = True,
     srcs = ["bits_benchmark.cc"],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
@@ -89,6 +89,7 @@
         ":bits",
         "//absl/base:config",
         "//absl/base:core_headers",
+        "//absl/types:compare",
     ],
 )
 
@@ -107,6 +108,7 @@
         "//absl/hash:hash_testing",
         "//absl/meta:type_traits",
         "//absl/strings",
+        "//absl/types:compare",
         "@com_google_googletest//:gtest",
         "@com_google_googletest//:gtest_main",
     ],
diff --git a/absl/numeric/CMakeLists.txt b/absl/numeric/CMakeLists.txt
index 7181b91..da3b6ef 100644
--- a/absl/numeric/CMakeLists.txt
+++ b/absl/numeric/CMakeLists.txt
@@ -53,6 +53,7 @@
   COPTS
     ${ABSL_DEFAULT_COPTS}
   DEPS
+    absl::compare
     absl::config
     absl::core_headers
     absl::bits
@@ -70,6 +71,7 @@
   DEPS
     absl::int128
     absl::base
+    absl::compare
     absl::hash_testing
     absl::type_traits
     absl::strings
diff --git a/absl/numeric/int128.cc b/absl/numeric/int128.cc
index daa32b5..5d6c68d 100644
--- a/absl/numeric/int128.cc
+++ b/absl/numeric/int128.cc
@@ -29,9 +29,6 @@
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 
-ABSL_DLL const uint128 kuint128max = MakeUint128(
-    std::numeric_limits<uint64_t>::max(), std::numeric_limits<uint64_t>::max());
-
 namespace {
 
 // Returns the 0-based position of the last set bit (i.e., most significant bit)
diff --git a/absl/numeric/int128.h b/absl/numeric/int128.h
index 7530a79..5a067d1 100644
--- a/absl/numeric/int128.h
+++ b/absl/numeric/int128.h
@@ -38,6 +38,7 @@
 #include "absl/base/config.h"
 #include "absl/base/macros.h"
 #include "absl/base/port.h"
+#include "absl/types/compare.h"
 
 #if defined(_MSC_VER)
 // In very old versions of MSVC and when the /Zc:wchar_t flag is off, wchar_t is
@@ -244,11 +245,6 @@
 #endif  // byte order
 };
 
-// Prefer to use the constexpr `Uint128Max()`.
-//
-// TODO(absl-team) deprecate kuint128max once migration tool is released.
-ABSL_DLL extern const uint128 kuint128max;
-
 // allow uint128 to be logged
 std::ostream& operator<<(std::ostream& os, uint128 v);
 
@@ -274,7 +270,9 @@
   static constexpr bool has_infinity = false;
   static constexpr bool has_quiet_NaN = false;
   static constexpr bool has_signaling_NaN = false;
+  ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
   static constexpr float_denorm_style has_denorm = denorm_absent;
+  ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
   static constexpr bool has_denorm_loss = false;
   static constexpr float_round_style round_style = round_toward_zero;
   static constexpr bool is_iec559 = false;
@@ -517,7 +515,9 @@
   static constexpr bool has_infinity = false;
   static constexpr bool has_quiet_NaN = false;
   static constexpr bool has_signaling_NaN = false;
+  ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
   static constexpr float_denorm_style has_denorm = denorm_absent;
+  ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
   static constexpr bool has_denorm_loss = false;
   static constexpr float_round_style round_style = round_toward_zero;
   static constexpr bool is_iec559 = false;
@@ -824,6 +824,36 @@
 
 constexpr bool operator>=(uint128 lhs, uint128 rhs) { return !(lhs < rhs); }
 
+#ifdef __cpp_impl_three_way_comparison
+constexpr absl::strong_ordering operator<=>(uint128 lhs, uint128 rhs) {
+#if defined(ABSL_HAVE_INTRINSIC_INT128)
+  if (auto lhs_128 = static_cast<unsigned __int128>(lhs),
+      rhs_128 = static_cast<unsigned __int128>(rhs);
+      lhs_128 < rhs_128) {
+    return absl::strong_ordering::less;
+  } else if (lhs_128 > rhs_128) {
+    return absl::strong_ordering::greater;
+  } else {
+    return absl::strong_ordering::equal;
+  }
+#else
+  if (uint64_t lhs_high = Uint128High64(lhs), rhs_high = Uint128High64(rhs);
+      lhs_high < rhs_high) {
+    return absl::strong_ordering::less;
+  } else if (lhs_high > rhs_high) {
+    return absl::strong_ordering::greater;
+  } else if (uint64_t lhs_low = Uint128Low64(lhs), rhs_low = Uint128Low64(rhs);
+             lhs_low < rhs_low) {
+    return absl::strong_ordering::less;
+  } else if (lhs_low > rhs_low) {
+    return absl::strong_ordering::greater;
+  } else {
+    return absl::strong_ordering::equal;
+  }
+#endif
+}
+#endif
+
 // Unary operators.
 
 constexpr inline uint128 operator+(uint128 val) { return val; }
diff --git a/absl/numeric/int128_have_intrinsic.inc b/absl/numeric/int128_have_intrinsic.inc
index 6f1ac64..51e4b9d 100644
--- a/absl/numeric/int128_have_intrinsic.inc
+++ b/absl/numeric/int128_have_intrinsic.inc
@@ -220,6 +220,20 @@
   return static_cast<__int128>(lhs) >= static_cast<__int128>(rhs);
 }
 
+#ifdef __cpp_impl_three_way_comparison
+constexpr absl::strong_ordering operator<=>(int128 lhs, int128 rhs) {
+  if (auto lhs_128 = static_cast<__int128>(lhs),
+      rhs_128 = static_cast<__int128>(rhs);
+      lhs_128 < rhs_128) {
+    return absl::strong_ordering::less;
+  } else if (lhs_128 > rhs_128) {
+    return absl::strong_ordering::greater;
+  } else {
+    return absl::strong_ordering::equal;
+  }
+}
+#endif
+
 // Unary operators.
 
 constexpr int128 operator-(int128 v) { return -static_cast<__int128>(v); }
diff --git a/absl/numeric/int128_no_intrinsic.inc b/absl/numeric/int128_no_intrinsic.inc
index 6f5d837..195b745 100644
--- a/absl/numeric/int128_no_intrinsic.inc
+++ b/absl/numeric/int128_no_intrinsic.inc
@@ -186,6 +186,24 @@
 
 constexpr bool operator>=(int128 lhs, int128 rhs) { return !(lhs < rhs); }
 
+#ifdef __cpp_impl_three_way_comparison
+constexpr absl::strong_ordering operator<=>(int128 lhs, int128 rhs) {
+  if (int64_t lhs_high = Int128High64(lhs), rhs_high = Int128High64(rhs);
+      lhs_high < rhs_high) {
+    return absl::strong_ordering::less;
+  } else if (lhs_high > rhs_high) {
+    return absl::strong_ordering::greater;
+  } else if (uint64_t lhs_low = Uint128Low64(lhs), rhs_low = Uint128Low64(rhs);
+             lhs_low < rhs_low) {
+    return absl::strong_ordering::less;
+  } else if (lhs_low > rhs_low) {
+    return absl::strong_ordering::greater;
+  } else {
+    return absl::strong_ordering::equal;
+  }
+}
+#endif
+
 // Unary operators.
 
 constexpr int128 operator-(int128 v) {
diff --git a/absl/numeric/int128_stream_test.cc b/absl/numeric/int128_stream_test.cc
index bd93784..1b058e1 100644
--- a/absl/numeric/int128_stream_test.cc
+++ b/absl/numeric/int128_stream_test.cc
@@ -135,11 +135,11 @@
                     "2000000000000000000000000000000000000000000"});
   CheckUint128Case({absl::MakeUint128(0x8000000000000000, 0), kHex,
                     /*width = */ 0, "80000000000000000000000000000000"});
-  CheckUint128Case({absl::kuint128max, kDec, /*width = */ 0,
+  CheckUint128Case({absl::Uint128Max(), kDec, /*width = */ 0,
                     "340282366920938463463374607431768211455"});
-  CheckUint128Case({absl::kuint128max, kOct, /*width = */ 0,
+  CheckUint128Case({absl::Uint128Max(), kOct, /*width = */ 0,
                     "3777777777777777777777777777777777777777777"});
-  CheckUint128Case({absl::kuint128max, kHex, /*width = */ 0,
+  CheckUint128Case({absl::Uint128Max(), kHex, /*width = */ 0,
                     "ffffffffffffffffffffffffffffffff"});
 }
 
diff --git a/absl/numeric/int128_test.cc b/absl/numeric/int128_test.cc
index 01e3eb5..3f16e05 100644
--- a/absl/numeric/int128_test.cc
+++ b/absl/numeric/int128_test.cc
@@ -25,12 +25,7 @@
 #include "absl/base/internal/cycleclock.h"
 #include "absl/hash/hash_testing.h"
 #include "absl/meta/type_traits.h"
-
-#if defined(_MSC_VER) && _MSC_VER == 1900
-// Disable "unary minus operator applied to unsigned type" warnings in Microsoft
-// Visual C++ 14 (2015).
-#pragma warning(disable:4146)
-#endif
+#include "absl/types/compare.h"
 
 #define MAKE_INT128(HI, LO) absl::MakeInt128(static_cast<int64_t>(HI), LO)
 
@@ -237,8 +232,6 @@
   EXPECT_EQ(two, -((-one) - 1));
   EXPECT_EQ(absl::Uint128Max(), -one);
   EXPECT_EQ(zero, -zero);
-
-  EXPECT_EQ(absl::Uint128Max(), absl::kuint128max);
 }
 
 TEST(Int128, RightShiftOfNegativeNumbers) {
@@ -792,6 +785,13 @@
     EXPECT_FALSE(pair.smaller >= pair.larger);  // NOLINT(readability/check)
     EXPECT_TRUE(pair.smaller >= pair.smaller);  // NOLINT(readability/check)
     EXPECT_TRUE(pair.larger >= pair.larger);    // NOLINT(readability/check)
+
+#ifdef __cpp_impl_three_way_comparison
+    EXPECT_EQ(pair.smaller <=> pair.larger, absl::strong_ordering::less);
+    EXPECT_EQ(pair.larger <=> pair.smaller, absl::strong_ordering::greater);
+    EXPECT_EQ(pair.smaller <=> pair.smaller, absl::strong_ordering::equal);
+    EXPECT_EQ(pair.larger <=> pair.larger, absl::strong_ordering::equal);
+#endif
   }
 }
 
diff --git a/absl/numeric/internal/bits.h b/absl/numeric/internal/bits.h
index bfef06b..0917464 100644
--- a/absl/numeric/internal/bits.h
+++ b/absl/numeric/internal/bits.h
@@ -167,7 +167,9 @@
 
 ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int
 CountLeadingZeroes16(uint16_t x) {
-#if ABSL_HAVE_BUILTIN(__builtin_clzs)
+#if ABSL_HAVE_BUILTIN(__builtin_clzg)
+  return x == 0 ? 16 : __builtin_clzg(x);
+#elif ABSL_HAVE_BUILTIN(__builtin_clzs)
   static_assert(sizeof(unsigned short) == sizeof(x),  // NOLINT(runtime/int)
                 "__builtin_clzs does not take 16-bit arg");
   return x == 0 ? 16 : __builtin_clzs(x);
@@ -303,7 +305,9 @@
 
 ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int
 CountTrailingZeroesNonzero16(uint16_t x) {
-#if ABSL_HAVE_BUILTIN(__builtin_ctzs)
+#if ABSL_HAVE_BUILTIN(__builtin_ctzg)
+  return __builtin_ctzg(x);
+#elif ABSL_HAVE_BUILTIN(__builtin_ctzs)
   static_assert(sizeof(unsigned short) == sizeof(x),  // NOLINT(runtime/int)
                 "__builtin_ctzs does not take 16-bit arg");
   return __builtin_ctzs(x);
diff --git a/absl/profiling/BUILD.bazel b/absl/profiling/BUILD.bazel
index 86f205f..abe127e 100644
--- a/absl/profiling/BUILD.bazel
+++ b/absl/profiling/BUILD.bazel
@@ -126,7 +126,7 @@
 
 cc_binary(
     name = "periodic_sampler_benchmark",
-    testonly = 1,
+    testonly = True,
     srcs = ["internal/periodic_sampler_benchmark.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
diff --git a/absl/profiling/internal/periodic_sampler.h b/absl/profiling/internal/periodic_sampler.h
index 54f0af4..f5d847a 100644
--- a/absl/profiling/internal/periodic_sampler.h
+++ b/absl/profiling/internal/periodic_sampler.h
@@ -172,7 +172,7 @@
 // Typical use case:
 //
 //   struct HashTablezTag {};
-//   thread_local PeriodicSampler sampler;
+//   thread_local PeriodicSampler<HashTablezTag, 100> sampler;
 //
 //   void HashTableSamplingLogic(...) {
 //     if (sampler.Sample()) {
diff --git a/absl/random/BUILD.bazel b/absl/random/BUILD.bazel
index 80c4f05..f276cc0 100644
--- a/absl/random/BUILD.bazel
+++ b/absl/random/BUILD.bazel
@@ -108,9 +108,11 @@
     deps = [
         ":seed_gen_exception",
         "//absl/base:config",
+        "//absl/base:nullability",
         "//absl/random/internal:pool_urbg",
         "//absl/random/internal:salted_seed_seq",
         "//absl/random/internal:seed_material",
+        "//absl/strings:string_view",
         "//absl/types:span",
     ],
 )
@@ -132,35 +134,33 @@
 
 cc_library(
     name = "mock_distributions",
-    testonly = 1,
+    testonly = True,
     hdrs = ["mock_distributions.h"],
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
         ":distributions",
         ":mocking_bit_gen",
-        "//absl/meta:type_traits",
+        "//absl/base:config",
         "//absl/random/internal:mock_overload_set",
-        "@com_google_googletest//:gtest",
+        "//absl/random/internal:mock_validators",
     ],
 )
 
 cc_library(
     name = "mocking_bit_gen",
-    testonly = 1,
+    testonly = True,
     hdrs = [
         "mocking_bit_gen.h",
     ],
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
-        ":distributions",
         ":random",
+        "//absl/base:config",
+        "//absl/base:core_headers",
         "//absl/base:fast_type_id",
         "//absl/container:flat_hash_map",
         "//absl/meta:type_traits",
-        "//absl/random/internal:distribution_caller",
-        "//absl/strings",
-        "//absl/types:span",
-        "//absl/types:variant",
+        "//absl/random/internal:mock_helpers",
         "//absl/utility",
         "@com_google_googletest//:gtest",
     ],
@@ -221,6 +221,8 @@
     deps = [
         ":distributions",
         ":random",
+        "//absl/meta:type_traits",
+        "//absl/numeric:int128",
         "//absl/random/internal:distribution_test_util",
         "@com_google_googletest//:gtest",
         "@com_google_googletest//:gtest_main",
@@ -479,9 +481,11 @@
         "no_test_wasm",
     ],
     deps = [
+        ":distributions",
         ":mock_distributions",
         ":mocking_bit_gen",
         ":random",
+        "//absl/numeric:int128",
         "@com_google_googletest//:gtest",
         "@com_google_googletest//:gtest_main",
     ],
@@ -521,7 +525,7 @@
 # Benchmarks for various methods / test utilities
 cc_binary(
     name = "benchmarks",
-    testonly = 1,
+    testonly = True,
     srcs = [
         "benchmarks.cc",
     ],
diff --git a/absl/random/CMakeLists.txt b/absl/random/CMakeLists.txt
index bd363d8..ad5477e 100644
--- a/absl/random/CMakeLists.txt
+++ b/absl/random/CMakeLists.txt
@@ -77,6 +77,7 @@
   LINKOPTS
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
+    absl::config
     absl::fast_type_id
     absl::optional
 )
@@ -92,6 +93,7 @@
   LINKOPTS
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
+    absl::config
     absl::random_mocking_bit_gen
     absl::random_internal_mock_helpers
   TESTONLY
@@ -108,17 +110,15 @@
   LINKOPTS
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
+    absl::config
+    absl::core_headers
+    absl::fast_type_id
     absl::flat_hash_map
     absl::raw_logging_internal
-    absl::random_distributions
-    absl::random_internal_distribution_caller
-    absl::random_internal_mock_overload_set
+    absl::random_internal_mock_helpers
     absl::random_random
-    absl::strings
-    absl::span
     absl::type_traits
     absl::utility
-    absl::variant
     GTest::gmock
     GTest::gtest
   PUBLIC
@@ -135,6 +135,7 @@
   LINKOPTS
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
+    absl::random_distributions
     absl::random_mocking_bit_gen
     absl::random_random
     GTest::gmock
@@ -225,11 +226,13 @@
   DEPS
     absl::config
     absl::inlined_vector
+    absl::nullability
     absl::random_internal_pool_urbg
     absl::random_internal_salted_seed_seq
     absl::random_internal_seed_material
     absl::random_seed_gen_exception
     absl::span
+    absl::string_view
 )
 
 absl_cc_test(
@@ -285,6 +288,8 @@
   DEPS
     absl::random_distributions
     absl::random_random
+    absl::type_traits
+    absl::int128
     absl::random_internal_distribution_test_util
     GTest::gmock
     GTest::gtest_main
@@ -1171,6 +1176,26 @@
 )
 
 # Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    random_internal_mock_validators
+  HDRS
+    "internal/mock_validators.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::random_internal_iostream_state_saver
+    absl::random_internal_uniform_helper
+    absl::config
+    absl::raw_logging_internal
+    absl::strings
+    absl::string_view
+  TESTONLY
+)
+
+# Internal-only target, do not depend on directly.
 absl_cc_test(
   NAME
     random_internal_uniform_helper_test
@@ -1183,6 +1208,7 @@
   DEPS
     absl::random_internal_uniform_helper
     GTest::gtest_main
+    absl::int128
 )
 
 # Internal-only target, do not depend on directly.
diff --git a/absl/random/benchmarks.cc b/absl/random/benchmarks.cc
index 0900e81..26bc95e 100644
--- a/absl/random/benchmarks.cc
+++ b/absl/random/benchmarks.cc
@@ -291,7 +291,7 @@
   BENCHMARK_TEMPLATE(BM_Shuffle, Engine, 100)->ThreadPerCpu();      \
   BENCHMARK_TEMPLATE(BM_Shuffle, Engine, 1000)->ThreadPerCpu();     \
   BENCHMARK_TEMPLATE(BM_ShuffleReuse, Engine, 100)->ThreadPerCpu(); \
-  BENCHMARK_TEMPLATE(BM_ShuffleReuse, Engine, 1000)->ThreadPerCpu();
+  BENCHMARK_TEMPLATE(BM_ShuffleReuse, Engine, 1000)->ThreadPerCpu()
 
 #define BM_EXTENDED(Engine)                                                    \
   /* -------------- Extended Uniform -----------------------*/                 \
@@ -355,7 +355,7 @@
   BENCHMARK_TEMPLATE(BM_Beta, Engine, absl::beta_distribution<float>, 410,     \
                      580);                                                     \
   BENCHMARK_TEMPLATE(BM_Gamma, Engine, std::gamma_distribution<float>, 199);   \
-  BENCHMARK_TEMPLATE(BM_Gamma, Engine, std::gamma_distribution<double>, 199);
+  BENCHMARK_TEMPLATE(BM_Gamma, Engine, std::gamma_distribution<double>, 199)
 
 // ABSL Recommended interfaces.
 BM_BASIC(absl::InsecureBitGen);  // === pcg64_2018_engine
diff --git a/absl/random/beta_distribution.h b/absl/random/beta_distribution.h
index c154066..432c516 100644
--- a/absl/random/beta_distribution.h
+++ b/absl/random/beta_distribution.h
@@ -181,18 +181,18 @@
     result_type alpha_;
     result_type beta_;
 
-    result_type a_;  // the smaller of {alpha, beta}, or 1.0/alpha_ in JOEHNK
-    result_type b_;  // the larger of {alpha, beta}, or 1.0/beta_ in JOEHNK
-    result_type x_;  // alpha + beta, or the result in degenerate cases
-    result_type log_x_;  // log(x_)
-    result_type y_;      // "beta" in Cheng
-    result_type gamma_;  // "gamma" in Cheng
+    result_type a_{};  // the smaller of {alpha, beta}, or 1.0/alpha_ in JOEHNK
+    result_type b_{};  // the larger of {alpha, beta}, or 1.0/beta_ in JOEHNK
+    result_type x_{};  // alpha + beta, or the result in degenerate cases
+    result_type log_x_{};  // log(x_)
+    result_type y_{};      // "beta" in Cheng
+    result_type gamma_{};  // "gamma" in Cheng
 
-    Method method_;
+    Method method_{};
 
     // Placing this last for optimal alignment.
     // Whether alpha_ != a_, i.e. true iff alpha_ > beta_.
-    bool inverted_;
+    bool inverted_{};
 
     static_assert(std::is_floating_point<RealType>::value,
                   "Class-template absl::beta_distribution<> must be "
diff --git a/absl/random/bit_gen_ref.h b/absl/random/bit_gen_ref.h
index e475221..ac26d9d 100644
--- a/absl/random/bit_gen_ref.h
+++ b/absl/random/bit_gen_ref.h
@@ -28,6 +28,7 @@
 #include <type_traits>
 #include <utility>
 
+#include "absl/base/attributes.h"
 #include "absl/base/internal/fast_type_id.h"
 #include "absl/base/macros.h"
 #include "absl/meta/type_traits.h"
@@ -110,20 +111,21 @@
   BitGenRef& operator=(const BitGenRef&) = default;
   BitGenRef& operator=(BitGenRef&&) = default;
 
-  template <typename URBG, typename absl::enable_if_t<
-                               (!std::is_same<URBG, BitGenRef>::value &&
-                                random_internal::is_urbg<URBG>::value &&
-                                !HasInvokeMock<URBG>::value)>* = nullptr>
-  BitGenRef(URBG& gen)  // NOLINT
+  template <
+      typename URBGRef, typename URBG = absl::remove_cvref_t<URBGRef>,
+      typename absl::enable_if_t<(!std::is_same<URBG, BitGenRef>::value &&
+                                  random_internal::is_urbg<URBG>::value &&
+                                  !HasInvokeMock<URBG>::value)>* = nullptr>
+  BitGenRef(URBGRef&& gen ABSL_ATTRIBUTE_LIFETIME_BOUND)  // NOLINT
       : t_erased_gen_ptr_(reinterpret_cast<uintptr_t>(&gen)),
         mock_call_(NotAMock),
         generate_impl_fn_(ImplFn<URBG>) {}
 
-  template <typename URBG,
+  template <typename URBGRef, typename URBG = absl::remove_cvref_t<URBGRef>,
             typename absl::enable_if_t<(!std::is_same<URBG, BitGenRef>::value &&
                                         random_internal::is_urbg<URBG>::value &&
                                         HasInvokeMock<URBG>::value)>* = nullptr>
-  BitGenRef(URBG& gen)  // NOLINT
+  BitGenRef(URBGRef&& gen ABSL_ATTRIBUTE_LIFETIME_BOUND)  // NOLINT
       : t_erased_gen_ptr_(reinterpret_cast<uintptr_t>(&gen)),
         mock_call_(&MockCall<URBG>),
         generate_impl_fn_(ImplFn<URBG>) {}
diff --git a/absl/random/discrete_distribution_test.cc b/absl/random/discrete_distribution_test.cc
index 32405ea..f82ef84 100644
--- a/absl/random/discrete_distribution_test.cc
+++ b/absl/random/discrete_distribution_test.cc
@@ -200,7 +200,7 @@
 }
 
 TEST(DiscreteDistributionTest, StabilityTest) {
-  // absl::discrete_distribution stabilitiy relies on
+  // absl::discrete_distribution stability relies on
   // absl::uniform_int_distribution and absl::bernoulli_distribution.
   absl::random_internal::sequence_urbg urbg(
       {0x0003eb76f6f7f755ull, 0xFFCEA50FDB2F953Bull, 0xC332DDEFBE6C5AA5ull,
diff --git a/absl/random/distributions.h b/absl/random/distributions.h
index 4e3b332..b6ade68 100644
--- a/absl/random/distributions.h
+++ b/absl/random/distributions.h
@@ -32,8 +32,8 @@
 //     continuously and independently at a constant average rate
 //   * `absl::Gaussian` (also known as "normal distributions") for continuous
 //     distributions using an associated quadratic function
-//   * `absl::LogUniform` for continuous uniform distributions where the log
-//     to the given base of all values is uniform
+//   * `absl::LogUniform` for discrete distributions where the log to the given
+//     base of all values is uniform
 //   * `absl::Poisson` for discrete probability distributions that express the
 //     probability of a given number of events occurring within a fixed interval
 //   * `absl::Zipf` for discrete probability distributions commonly used for
@@ -46,23 +46,23 @@
 #ifndef ABSL_RANDOM_DISTRIBUTIONS_H_
 #define ABSL_RANDOM_DISTRIBUTIONS_H_
 
-#include <algorithm>
-#include <cmath>
 #include <limits>
-#include <random>
 #include <type_traits>
 
+#include "absl/base/config.h"
 #include "absl/base/internal/inline_variable.h"
+#include "absl/meta/type_traits.h"
 #include "absl/random/bernoulli_distribution.h"
 #include "absl/random/beta_distribution.h"
 #include "absl/random/exponential_distribution.h"
 #include "absl/random/gaussian_distribution.h"
 #include "absl/random/internal/distribution_caller.h"  // IWYU pragma: export
+#include "absl/random/internal/traits.h"
 #include "absl/random/internal/uniform_helper.h"  // IWYU pragma: export
 #include "absl/random/log_uniform_int_distribution.h"
 #include "absl/random/poisson_distribution.h"
-#include "absl/random/uniform_int_distribution.h"
-#include "absl/random/uniform_real_distribution.h"
+#include "absl/random/uniform_int_distribution.h"  // IWYU pragma: export
+#include "absl/random/uniform_real_distribution.h"  // IWYU pragma: export
 #include "absl/random/zipf_distribution.h"
 
 namespace absl {
@@ -176,7 +176,7 @@
 
   return random_internal::DistributionCaller<gen_t>::template Call<
       distribution_t>(&urbg, tag, static_cast<return_t>(lo),
-                                static_cast<return_t>(hi));
+                      static_cast<return_t>(hi));
 }
 
 // absl::Uniform(bitgen, lo, hi)
@@ -200,7 +200,7 @@
 
   return random_internal::DistributionCaller<gen_t>::template Call<
       distribution_t>(&urbg, static_cast<return_t>(lo),
-                                static_cast<return_t>(hi));
+                      static_cast<return_t>(hi));
 }
 
 // absl::Uniform<unsigned T>(bitgen)
@@ -208,7 +208,7 @@
 // Overload of Uniform() using the minimum and maximum values of a given type
 // `T` (which must be unsigned), returning a value of type `unsigned T`
 template <typename R, typename URBG>
-typename absl::enable_if_t<!std::is_signed<R>::value, R>  //
+typename absl::enable_if_t<!std::numeric_limits<R>::is_signed, R>  //
 Uniform(URBG&& urbg) {  // NOLINT(runtime/references)
   using gen_t = absl::decay_t<URBG>;
   using distribution_t = random_internal::UniformDistributionWrapper<R>;
@@ -362,7 +362,7 @@
 // If `lo` is nonzero then this distribution is shifted to the desired interval,
 // so LogUniform(lo, hi, b) is equivalent to LogUniform(0, hi-lo, b)+lo.
 //
-// See https://en.wikipedia.org/wiki/Log-normal_distribution
+// See https://en.wikipedia.org/wiki/Reciprocal_distribution
 //
 // Example:
 //
diff --git a/absl/random/distributions_test.cc b/absl/random/distributions_test.cc
index 5321a11..ea32183 100644
--- a/absl/random/distributions_test.cc
+++ b/absl/random/distributions_test.cc
@@ -17,10 +17,14 @@
 #include <cfloat>
 #include <cmath>
 #include <cstdint>
-#include <random>
+#include <limits>
+#include <type_traits>
+#include <utility>
 #include <vector>
 
 #include "gtest/gtest.h"
+#include "absl/meta/type_traits.h"
+#include "absl/numeric/int128.h"
 #include "absl/random/internal/distribution_test_util.h"
 #include "absl/random/random.h"
 
@@ -30,7 +34,6 @@
 
 class RandomDistributionsTest : public testing::Test {};
 
-
 struct Invalid {};
 
 template <typename A, typename B>
@@ -93,17 +96,18 @@
 }
 
 template <typename A, typename B, typename ExplicitRet>
-auto ExplicitUniformReturnT(int) -> decltype(
-    absl::Uniform<ExplicitRet>(*std::declval<absl::InsecureBitGen*>(),
-                               std::declval<A>(), std::declval<B>()));
+auto ExplicitUniformReturnT(int) -> decltype(absl::Uniform<ExplicitRet>(
+                                     std::declval<absl::InsecureBitGen&>(),
+                                     std::declval<A>(), std::declval<B>()));
 
 template <typename, typename, typename ExplicitRet>
 Invalid ExplicitUniformReturnT(...);
 
 template <typename TagType, typename A, typename B, typename ExplicitRet>
-auto ExplicitTaggedUniformReturnT(int) -> decltype(absl::Uniform<ExplicitRet>(
-    std::declval<TagType>(), *std::declval<absl::InsecureBitGen*>(),
-    std::declval<A>(), std::declval<B>()));
+auto ExplicitTaggedUniformReturnT(int)
+    -> decltype(absl::Uniform<ExplicitRet>(
+        std::declval<TagType>(), std::declval<absl::InsecureBitGen&>(),
+        std::declval<A>(), std::declval<B>()));
 
 template <typename, typename, typename, typename ExplicitRet>
 Invalid ExplicitTaggedUniformReturnT(...);
@@ -135,6 +139,14 @@
       "");
 }
 
+// Takes the type of `absl::Uniform<R>(gen)` if valid or `Invalid` otherwise.
+template <typename R>
+auto UniformNoBoundsReturnT(int)
+    -> decltype(absl::Uniform<R>(std::declval<absl::InsecureBitGen&>()));
+
+template <typename>
+Invalid UniformNoBoundsReturnT(...);
+
 TEST_F(RandomDistributionsTest, UniformTypeInference) {
   // Infers common types.
   CheckArgsInferType<uint16_t, uint16_t, uint16_t>();
@@ -221,6 +233,38 @@
   absl::Uniform<uint32_t>(gen);
   absl::Uniform<uint64_t>(gen);
   absl::Uniform<absl::uint128>(gen);
+
+  // Compile-time validity tests.
+
+  // Allows unsigned ints.
+  testing::StaticAssertTypeEq<uint8_t,
+                              decltype(UniformNoBoundsReturnT<uint8_t>(0))>();
+  testing::StaticAssertTypeEq<uint16_t,
+                              decltype(UniformNoBoundsReturnT<uint16_t>(0))>();
+  testing::StaticAssertTypeEq<uint32_t,
+                              decltype(UniformNoBoundsReturnT<uint32_t>(0))>();
+  testing::StaticAssertTypeEq<uint64_t,
+                              decltype(UniformNoBoundsReturnT<uint64_t>(0))>();
+  testing::StaticAssertTypeEq<
+      absl::uint128, decltype(UniformNoBoundsReturnT<absl::uint128>(0))>();
+
+  // Disallows signed ints.
+  testing::StaticAssertTypeEq<Invalid,
+                              decltype(UniformNoBoundsReturnT<int8_t>(0))>();
+  testing::StaticAssertTypeEq<Invalid,
+                              decltype(UniformNoBoundsReturnT<int16_t>(0))>();
+  testing::StaticAssertTypeEq<Invalid,
+                              decltype(UniformNoBoundsReturnT<int32_t>(0))>();
+  testing::StaticAssertTypeEq<Invalid,
+                              decltype(UniformNoBoundsReturnT<int64_t>(0))>();
+  testing::StaticAssertTypeEq<
+      Invalid, decltype(UniformNoBoundsReturnT<absl::int128>(0))>();
+
+  // Disallows float types.
+  testing::StaticAssertTypeEq<Invalid,
+                              decltype(UniformNoBoundsReturnT<float>(0))>();
+  testing::StaticAssertTypeEq<Invalid,
+                              decltype(UniformNoBoundsReturnT<double>(0))>();
 }
 
 TEST_F(RandomDistributionsTest, UniformNonsenseRanges) {
diff --git a/absl/random/internal/BUILD.bazel b/absl/random/internal/BUILD.bazel
index 71a742e..5e05130 100644
--- a/absl/random/internal/BUILD.bazel
+++ b/absl/random/internal/BUILD.bazel
@@ -137,7 +137,7 @@
 
 cc_library(
     name = "explicit_seed_seq",
-    testonly = 1,
+    testonly = True,
     hdrs = [
         "explicit_seed_seq.h",
     ],
@@ -151,7 +151,7 @@
 
 cc_library(
     name = "sequence_urbg",
-    testonly = 1,
+    testonly = True,
     hdrs = [
         "sequence_urbg.h",
     ],
@@ -375,7 +375,7 @@
 
 cc_library(
     name = "distribution_test_util",
-    testonly = 1,
+    testonly = True,
     srcs = [
         "chi_square.cc",
         "distribution_test_util.cc",
@@ -527,6 +527,7 @@
     hdrs = ["mock_helpers.h"],
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
+        "//absl/base:config",
         "//absl/base:fast_type_id",
         "//absl/types:optional",
     ],
@@ -534,11 +535,12 @@
 
 cc_library(
     name = "mock_overload_set",
-    testonly = 1,
+    testonly = True,
     hdrs = ["mock_overload_set.h"],
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
         ":mock_helpers",
+        "//absl/base:config",
         "//absl/random:mocking_bit_gen",
         "@com_google_googletest//:gtest",
     ],
@@ -712,7 +714,19 @@
         ":traits",
         "//absl/base:config",
         "//absl/meta:type_traits",
-        "//absl/numeric:int128",
+    ],
+)
+
+cc_library(
+    name = "mock_validators",
+    hdrs = ["mock_validators.h"],
+    deps = [
+        ":iostream_state_saver",
+        ":uniform_helper",
+        "//absl/base:config",
+        "//absl/base:raw_logging_internal",
+        "//absl/strings",
+        "//absl/strings:string_view",
     ],
 )
 
diff --git a/absl/random/internal/mock_helpers.h b/absl/random/internal/mock_helpers.h
index a7a97bf..19d0561 100644
--- a/absl/random/internal/mock_helpers.h
+++ b/absl/random/internal/mock_helpers.h
@@ -16,10 +16,9 @@
 #ifndef ABSL_RANDOM_INTERNAL_MOCK_HELPERS_H_
 #define ABSL_RANDOM_INTERNAL_MOCK_HELPERS_H_
 
-#include <tuple>
-#include <type_traits>
 #include <utility>
 
+#include "absl/base/config.h"
 #include "absl/base/internal/fast_type_id.h"
 #include "absl/types/optional.h"
 
@@ -27,6 +26,16 @@
 ABSL_NAMESPACE_BEGIN
 namespace random_internal {
 
+// A no-op validator meeting the ValidatorT requirements for MockHelpers.
+//
+// Custom validators should follow a similar structure, passing the type to
+// MockHelpers::MockFor<KeyT>(m, CustomValidatorT()).
+struct NoOpValidator {
+  // Default validation: do nothing.
+  template <typename ResultT, typename... Args>
+  static void Validate(ResultT, Args&&...) {}
+};
+
 // MockHelpers works in conjunction with MockOverloadSet, MockingBitGen, and
 // BitGenRef to enable the mocking capability for absl distribution functions.
 //
@@ -109,6 +118,29 @@
         0, urbg, std::forward<Args>(args)...);
   }
 
+  // Acquire a mock for the KeyT (may or may not be a signature), set up to use
+  // the ValidatorT to verify that the result is in the range of the RNG
+  // function.
+  //
+  // KeyT is used to generate a typeid-based lookup for the mock.
+  // KeyT is a signature of the form:
+  //   result_type(discriminator_type, std::tuple<args...>)
+  // The mocked function signature will be composed from KeyT as:
+  //   result_type(args...)
+  // ValidatorT::Validate will be called after the result of the RNG. The
+  //   signature is expected to be of the form:
+  //      ValidatorT::Validate(result, args...)
+  template <typename KeyT, typename ValidatorT, typename MockURBG>
+  static auto MockFor(MockURBG& m, ValidatorT)
+      -> decltype(m.template RegisterMock<
+                  typename KeySignature<KeyT>::result_type,
+                  typename KeySignature<KeyT>::arg_tuple_type>(
+          m, std::declval<IdType>(), ValidatorT())) {
+    return m.template RegisterMock<typename KeySignature<KeyT>::result_type,
+                                   typename KeySignature<KeyT>::arg_tuple_type>(
+        m, ::absl::base_internal::FastTypeId<KeyT>(), ValidatorT());
+  }
+
   // Acquire a mock for the KeyT (may or may not be a signature).
   //
   // KeyT is used to generate a typeid-based lookup for the mock.
@@ -117,14 +149,8 @@
   // The mocked function signature will be composed from KeyT as:
   //   result_type(args...)
   template <typename KeyT, typename MockURBG>
-  static auto MockFor(MockURBG& m)
-      -> decltype(m.template RegisterMock<
-                  typename KeySignature<KeyT>::result_type,
-                  typename KeySignature<KeyT>::arg_tuple_type>(
-          m, std::declval<IdType>())) {
-    return m.template RegisterMock<typename KeySignature<KeyT>::result_type,
-                                   typename KeySignature<KeyT>::arg_tuple_type>(
-        m, ::absl::base_internal::FastTypeId<KeyT>());
+  static decltype(auto) MockFor(MockURBG& m) {
+    return MockFor<KeyT>(m, NoOpValidator());
   }
 };
 
diff --git a/absl/random/internal/mock_overload_set.h b/absl/random/internal/mock_overload_set.h
index 0d9c6c1..cfaeeee 100644
--- a/absl/random/internal/mock_overload_set.h
+++ b/absl/random/internal/mock_overload_set.h
@@ -16,9 +16,11 @@
 #ifndef ABSL_RANDOM_INTERNAL_MOCK_OVERLOAD_SET_H_
 #define ABSL_RANDOM_INTERNAL_MOCK_OVERLOAD_SET_H_
 
+#include <tuple>
 #include <type_traits>
 
 #include "gmock/gmock.h"
+#include "absl/base/config.h"
 #include "absl/random/internal/mock_helpers.h"
 #include "absl/random/mocking_bit_gen.h"
 
@@ -26,7 +28,7 @@
 ABSL_NAMESPACE_BEGIN
 namespace random_internal {
 
-template <typename DistrT, typename Fn>
+template <typename DistrT, typename ValidatorT, typename Fn>
 struct MockSingleOverload;
 
 // MockSingleOverload
@@ -38,8 +40,8 @@
 // arguments to MockingBitGen::Register.
 //
 // The underlying KeyT must match the KeyT constructed by DistributionCaller.
-template <typename DistrT, typename Ret, typename... Args>
-struct MockSingleOverload<DistrT, Ret(MockingBitGen&, Args...)> {
+template <typename DistrT, typename ValidatorT, typename Ret, typename... Args>
+struct MockSingleOverload<DistrT, ValidatorT, Ret(MockingBitGen&, Args...)> {
   static_assert(std::is_same<typename DistrT::result_type, Ret>::value,
                 "Overload signature must have return type matching the "
                 "distribution result_type.");
@@ -47,15 +49,21 @@
 
   template <typename MockURBG>
   auto gmock_Call(MockURBG& gen, const ::testing::Matcher<Args>&... matchers)
-      -> decltype(MockHelpers::MockFor<KeyT>(gen).gmock_Call(matchers...)) {
-    static_assert(std::is_base_of<MockingBitGen, MockURBG>::value,
-                  "Mocking requires an absl::MockingBitGen");
-    return MockHelpers::MockFor<KeyT>(gen).gmock_Call(matchers...);
+      -> decltype(MockHelpers::MockFor<KeyT>(gen, ValidatorT())
+                      .gmock_Call(matchers...)) {
+    static_assert(
+        std::is_base_of<MockingBitGenImpl<true>, MockURBG>::value ||
+            std::is_base_of<MockingBitGenImpl<false>, MockURBG>::value,
+        "Mocking requires an absl::MockingBitGen");
+    return MockHelpers::MockFor<KeyT>(gen, ValidatorT())
+        .gmock_Call(matchers...);
   }
 };
 
-template <typename DistrT, typename Ret, typename Arg, typename... Args>
-struct MockSingleOverload<DistrT, Ret(Arg, MockingBitGen&, Args...)> {
+template <typename DistrT, typename ValidatorT, typename Ret, typename Arg,
+          typename... Args>
+struct MockSingleOverload<DistrT, ValidatorT,
+                          Ret(Arg, MockingBitGen&, Args...)> {
   static_assert(std::is_same<typename DistrT::result_type, Ret>::value,
                 "Overload signature must have return type matching the "
                 "distribution result_type.");
@@ -64,14 +72,44 @@
   template <typename MockURBG>
   auto gmock_Call(const ::testing::Matcher<Arg>& matcher, MockURBG& gen,
                   const ::testing::Matcher<Args>&... matchers)
-      -> decltype(MockHelpers::MockFor<KeyT>(gen).gmock_Call(matcher,
-                                                             matchers...)) {
-    static_assert(std::is_base_of<MockingBitGen, MockURBG>::value,
-                  "Mocking requires an absl::MockingBitGen");
-    return MockHelpers::MockFor<KeyT>(gen).gmock_Call(matcher, matchers...);
+      -> decltype(MockHelpers::MockFor<KeyT>(gen, ValidatorT())
+                      .gmock_Call(matcher, matchers...)) {
+    static_assert(
+        std::is_base_of<MockingBitGenImpl<true>, MockURBG>::value ||
+            std::is_base_of<MockingBitGenImpl<false>, MockURBG>::value,
+        "Mocking requires an absl::MockingBitGen");
+    return MockHelpers::MockFor<KeyT>(gen, ValidatorT())
+        .gmock_Call(matcher, matchers...);
   }
 };
 
+// MockOverloadSetWithValidator
+//
+// MockOverloadSetWithValidator is a wrapper around MockOverloadSet which takes
+// an additional Validator parameter, allowing for customization of the mock
+// behavior.
+//
+// `ValidatorT::Validate(result, args...)` will be called after the mock
+// distribution returns a value in `result`, allowing for validation against the
+// args.
+template <typename DistrT, typename ValidatorT, typename... Fns>
+struct MockOverloadSetWithValidator;
+
+template <typename DistrT, typename ValidatorT, typename Sig>
+struct MockOverloadSetWithValidator<DistrT, ValidatorT, Sig>
+    : public MockSingleOverload<DistrT, ValidatorT, Sig> {
+  using MockSingleOverload<DistrT, ValidatorT, Sig>::gmock_Call;
+};
+
+template <typename DistrT, typename ValidatorT, typename FirstSig,
+          typename... Rest>
+struct MockOverloadSetWithValidator<DistrT, ValidatorT, FirstSig, Rest...>
+    : public MockSingleOverload<DistrT, ValidatorT, FirstSig>,
+      public MockOverloadSetWithValidator<DistrT, ValidatorT, Rest...> {
+  using MockSingleOverload<DistrT, ValidatorT, FirstSig>::gmock_Call;
+  using MockOverloadSetWithValidator<DistrT, ValidatorT, Rest...>::gmock_Call;
+};
+
 // MockOverloadSet
 //
 // MockOverloadSet takes a distribution and a collection of signatures and
@@ -79,20 +117,8 @@
 // `EXPECT_CALL(mock_overload_set, Call(...))` expand and do overload resolution
 // correctly.
 template <typename DistrT, typename... Signatures>
-struct MockOverloadSet;
-
-template <typename DistrT, typename Sig>
-struct MockOverloadSet<DistrT, Sig> : public MockSingleOverload<DistrT, Sig> {
-  using MockSingleOverload<DistrT, Sig>::gmock_Call;
-};
-
-template <typename DistrT, typename FirstSig, typename... Rest>
-struct MockOverloadSet<DistrT, FirstSig, Rest...>
-    : public MockSingleOverload<DistrT, FirstSig>,
-      public MockOverloadSet<DistrT, Rest...> {
-  using MockSingleOverload<DistrT, FirstSig>::gmock_Call;
-  using MockOverloadSet<DistrT, Rest...>::gmock_Call;
-};
+using MockOverloadSet =
+    MockOverloadSetWithValidator<DistrT, NoOpValidator, Signatures...>;
 
 }  // namespace random_internal
 ABSL_NAMESPACE_END
diff --git a/absl/random/internal/mock_validators.h b/absl/random/internal/mock_validators.h
new file mode 100644
index 0000000..d76d169
--- /dev/null
+++ b/absl/random/internal/mock_validators.h
@@ -0,0 +1,98 @@
+// Copyright 2024 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_RANDOM_INTERNAL_MOCK_VALIDATORS_H_
+#define ABSL_RANDOM_INTERNAL_MOCK_VALIDATORS_H_
+
+#include <type_traits>
+
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/random/internal/iostream_state_saver.h"
+#include "absl/random/internal/uniform_helper.h"
+#include "absl/strings/str_cat.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace random_internal {
+
+template <typename NumType>
+class UniformDistributionValidator {
+ public:
+  // Handle absl::Uniform<NumType>(gen, absl::IntervalTag, lo, hi).
+  template <typename TagType>
+  static void Validate(NumType x, TagType tag, NumType lo, NumType hi) {
+    // For invalid ranges, absl::Uniform() simply returns one of the bounds.
+    if (x == lo && lo == hi) return;
+
+    ValidateImpl(std::is_floating_point<NumType>{}, x, tag, lo, hi);
+  }
+
+  // Handle absl::Uniform<NumType>(gen, lo, hi).
+  static void Validate(NumType x, NumType lo, NumType hi) {
+    Validate(x, IntervalClosedOpenTag(), lo, hi);
+  }
+
+  // Handle absl::Uniform<NumType>(gen).
+  static void Validate(NumType) {
+    // absl::Uniform<NumType>(gen) spans the entire range of `NumType`, so any
+    // value is okay. This overload exists because the validation logic attempts
+    // to call it anyway rather than adding extra SFINAE.
+  }
+
+ private:
+  static absl::string_view TagLbBound(IntervalClosedOpenTag) { return "["; }
+  static absl::string_view TagLbBound(IntervalOpenOpenTag) { return "("; }
+  static absl::string_view TagLbBound(IntervalClosedClosedTag) { return "["; }
+  static absl::string_view TagLbBound(IntervalOpenClosedTag) { return "("; }
+  static absl::string_view TagUbBound(IntervalClosedOpenTag) { return ")"; }
+  static absl::string_view TagUbBound(IntervalOpenOpenTag) { return ")"; }
+  static absl::string_view TagUbBound(IntervalClosedClosedTag) { return "]"; }
+  static absl::string_view TagUbBound(IntervalOpenClosedTag) { return "]"; }
+
+  template <typename TagType>
+  static void ValidateImpl(std::true_type /* is_floating_point */, NumType x,
+                           TagType tag, NumType lo, NumType hi) {
+    UniformDistributionWrapper<NumType> dist(tag, lo, hi);
+    NumType lb = dist.a();
+    NumType ub = dist.b();
+    // uniform_real_distribution is always closed-open, so the upper bound is
+    // always non-inclusive.
+    ABSL_INTERNAL_CHECK(lb <= x && x < ub,
+                        absl::StrCat(x, " is not in ", TagLbBound(tag), lo,
+                                     ", ", hi, TagUbBound(tag)));
+  }
+
+  template <typename TagType>
+  static void ValidateImpl(std::false_type /* is_floating_point */, NumType x,
+                           TagType tag, NumType lo, NumType hi) {
+    using stream_type =
+        typename random_internal::stream_format_type<NumType>::type;
+
+    UniformDistributionWrapper<NumType> dist(tag, lo, hi);
+    NumType lb = dist.a();
+    NumType ub = dist.b();
+    ABSL_INTERNAL_CHECK(
+        lb <= x && x <= ub,
+        absl::StrCat(stream_type{x}, " is not in ", TagLbBound(tag),
+                     stream_type{lo}, ", ", stream_type{hi}, TagUbBound(tag)));
+  }
+};
+
+}  // namespace random_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_RANDOM_INTERNAL_MOCK_VALIDATORS_H_
diff --git a/absl/random/mock_distributions.h b/absl/random/mock_distributions.h
index 764ab37..b379262 100644
--- a/absl/random/mock_distributions.h
+++ b/absl/random/mock_distributions.h
@@ -46,16 +46,18 @@
 #ifndef ABSL_RANDOM_MOCK_DISTRIBUTIONS_H_
 #define ABSL_RANDOM_MOCK_DISTRIBUTIONS_H_
 
-#include <limits>
-#include <type_traits>
-#include <utility>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/meta/type_traits.h"
+#include "absl/base/config.h"
+#include "absl/random/bernoulli_distribution.h"
+#include "absl/random/beta_distribution.h"
 #include "absl/random/distributions.h"
+#include "absl/random/exponential_distribution.h"
+#include "absl/random/gaussian_distribution.h"
 #include "absl/random/internal/mock_overload_set.h"
+#include "absl/random/internal/mock_validators.h"
+#include "absl/random/log_uniform_int_distribution.h"
 #include "absl/random/mocking_bit_gen.h"
+#include "absl/random/poisson_distribution.h"
+#include "absl/random/zipf_distribution.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
@@ -80,8 +82,9 @@
 //  assert(x == 123456)
 //
 template <typename R>
-using MockUniform = random_internal::MockOverloadSet<
+using MockUniform = random_internal::MockOverloadSetWithValidator<
     random_internal::UniformDistributionWrapper<R>,
+    random_internal::UniformDistributionValidator<R>,
     R(IntervalClosedOpenTag, MockingBitGen&, R, R),
     R(IntervalClosedClosedTag, MockingBitGen&, R, R),
     R(IntervalOpenOpenTag, MockingBitGen&, R, R),
diff --git a/absl/random/mock_distributions_test.cc b/absl/random/mock_distributions_test.cc
index de23baf..05e313c 100644
--- a/absl/random/mock_distributions_test.cc
+++ b/absl/random/mock_distributions_test.cc
@@ -14,7 +14,13 @@
 
 #include "absl/random/mock_distributions.h"
 
+#include <cmath>
+#include <limits>
+
+#include "gmock/gmock.h"
 #include "gtest/gtest.h"
+#include "absl/numeric/int128.h"
+#include "absl/random/distributions.h"
 #include "absl/random/mocking_bit_gen.h"
 #include "absl/random/random.h"
 
@@ -69,4 +75,213 @@
   EXPECT_EQ(absl::LogUniform<int>(gen, 0, 1000000, 2), 2040);
 }
 
+TEST(MockUniform, OutOfBoundsIsAllowed) {
+  absl::UnvalidatedMockingBitGen gen;
+
+  EXPECT_CALL(absl::MockUniform<int>(), Call(gen, 1, 100)).WillOnce(Return(0));
+  EXPECT_EQ(absl::Uniform<int>(gen, 1, 100), 0);
+}
+
+TEST(ValidatedMockDistributions, UniformUInt128Works) {
+  absl::MockingBitGen gen;
+
+  EXPECT_CALL(absl::MockUniform<absl::uint128>(), Call(gen))
+      .WillOnce(Return(absl::Uint128Max()));
+  EXPECT_EQ(absl::Uniform<absl::uint128>(gen), absl::Uint128Max());
+}
+
+TEST(ValidatedMockDistributions, UniformDoubleBoundaryCases) {
+  absl::MockingBitGen gen;
+
+  EXPECT_CALL(absl::MockUniform<double>(), Call(gen, 1.0, 10.0))
+      .WillOnce(Return(
+          std::nextafter(10.0, -std::numeric_limits<double>::infinity())));
+  EXPECT_EQ(absl::Uniform<double>(gen, 1.0, 10.0),
+            std::nextafter(10.0, -std::numeric_limits<double>::infinity()));
+
+  EXPECT_CALL(absl::MockUniform<double>(),
+              Call(absl::IntervalOpen, gen, 1.0, 10.0))
+      .WillOnce(Return(
+          std::nextafter(10.0, -std::numeric_limits<double>::infinity())));
+  EXPECT_EQ(absl::Uniform<double>(absl::IntervalOpen, gen, 1.0, 10.0),
+            std::nextafter(10.0, -std::numeric_limits<double>::infinity()));
+
+  EXPECT_CALL(absl::MockUniform<double>(),
+              Call(absl::IntervalOpen, gen, 1.0, 10.0))
+      .WillOnce(
+          Return(std::nextafter(1.0, std::numeric_limits<double>::infinity())));
+  EXPECT_EQ(absl::Uniform<double>(absl::IntervalOpen, gen, 1.0, 10.0),
+            std::nextafter(1.0, std::numeric_limits<double>::infinity()));
+}
+
+TEST(ValidatedMockDistributions, UniformDoubleEmptyRangeCases) {
+  absl::MockingBitGen gen;
+
+  ON_CALL(absl::MockUniform<double>(), Call(absl::IntervalOpen, gen, 1.0, 1.0))
+      .WillByDefault(Return(1.0));
+  EXPECT_EQ(absl::Uniform<double>(absl::IntervalOpen, gen, 1.0, 1.0), 1.0);
+
+  ON_CALL(absl::MockUniform<double>(),
+          Call(absl::IntervalOpenClosed, gen, 1.0, 1.0))
+      .WillByDefault(Return(1.0));
+  EXPECT_EQ(absl::Uniform<double>(absl::IntervalOpenClosed, gen, 1.0, 1.0),
+            1.0);
+
+  ON_CALL(absl::MockUniform<double>(),
+          Call(absl::IntervalClosedOpen, gen, 1.0, 1.0))
+      .WillByDefault(Return(1.0));
+  EXPECT_EQ(absl::Uniform<double>(absl::IntervalClosedOpen, gen, 1.0, 1.0),
+            1.0);
+}
+
+TEST(ValidatedMockDistributions, UniformIntEmptyRangeCases) {
+  absl::MockingBitGen gen;
+
+  ON_CALL(absl::MockUniform<int>(), Call(absl::IntervalOpen, gen, 1, 1))
+      .WillByDefault(Return(1));
+  EXPECT_EQ(absl::Uniform<int>(absl::IntervalOpen, gen, 1, 1), 1);
+
+  ON_CALL(absl::MockUniform<int>(), Call(absl::IntervalOpenClosed, gen, 1, 1))
+      .WillByDefault(Return(1));
+  EXPECT_EQ(absl::Uniform<int>(absl::IntervalOpenClosed, gen, 1, 1), 1);
+
+  ON_CALL(absl::MockUniform<int>(), Call(absl::IntervalClosedOpen, gen, 1, 1))
+      .WillByDefault(Return(1));
+  EXPECT_EQ(absl::Uniform<int>(absl::IntervalClosedOpen, gen, 1, 1), 1);
+}
+
+TEST(ValidatedMockUniformDeathTest, Examples) {
+  absl::MockingBitGen gen;
+
+  EXPECT_DEATH_IF_SUPPORTED(
+      {
+        EXPECT_CALL(absl::MockUniform<int>(), Call(gen, 1, 100))
+            .WillOnce(Return(0));
+        absl::Uniform<int>(gen, 1, 100);
+      },
+      " 0 is not in \\[1, 100\\)");
+  EXPECT_DEATH_IF_SUPPORTED(
+      {
+        EXPECT_CALL(absl::MockUniform<int>(), Call(gen, 1, 100))
+            .WillOnce(Return(101));
+        absl::Uniform<int>(gen, 1, 100);
+      },
+      " 101 is not in \\[1, 100\\)");
+  EXPECT_DEATH_IF_SUPPORTED(
+      {
+        EXPECT_CALL(absl::MockUniform<int>(), Call(gen, 1, 100))
+            .WillOnce(Return(100));
+        absl::Uniform<int>(gen, 1, 100);
+      },
+      " 100 is not in \\[1, 100\\)");
+
+  EXPECT_DEATH_IF_SUPPORTED(
+      {
+        EXPECT_CALL(absl::MockUniform<int>(),
+                    Call(absl::IntervalOpen, gen, 1, 100))
+            .WillOnce(Return(1));
+        absl::Uniform<int>(absl::IntervalOpen, gen, 1, 100);
+      },
+      " 1 is not in \\(1, 100\\)");
+  EXPECT_DEATH_IF_SUPPORTED(
+      {
+        EXPECT_CALL(absl::MockUniform<int>(),
+                    Call(absl::IntervalOpen, gen, 1, 100))
+            .WillOnce(Return(101));
+        absl::Uniform<int>(absl::IntervalOpen, gen, 1, 100);
+      },
+      " 101 is not in \\(1, 100\\)");
+  EXPECT_DEATH_IF_SUPPORTED(
+      {
+        EXPECT_CALL(absl::MockUniform<int>(),
+                    Call(absl::IntervalOpen, gen, 1, 100))
+            .WillOnce(Return(100));
+        absl::Uniform<int>(absl::IntervalOpen, gen, 1, 100);
+      },
+      " 100 is not in \\(1, 100\\)");
+
+  EXPECT_DEATH_IF_SUPPORTED(
+      {
+        EXPECT_CALL(absl::MockUniform<int>(),
+                    Call(absl::IntervalOpenClosed, gen, 1, 100))
+            .WillOnce(Return(1));
+        absl::Uniform<int>(absl::IntervalOpenClosed, gen, 1, 100);
+      },
+      " 1 is not in \\(1, 100\\]");
+  EXPECT_DEATH_IF_SUPPORTED(
+      {
+        EXPECT_CALL(absl::MockUniform<int>(),
+                    Call(absl::IntervalOpenClosed, gen, 1, 100))
+            .WillOnce(Return(101));
+        absl::Uniform<int>(absl::IntervalOpenClosed, gen, 1, 100);
+      },
+      " 101 is not in \\(1, 100\\]");
+
+  EXPECT_DEATH_IF_SUPPORTED(
+      {
+        EXPECT_CALL(absl::MockUniform<int>(),
+                    Call(absl::IntervalOpenClosed, gen, 1, 100))
+            .WillOnce(Return(0));
+        absl::Uniform<int>(absl::IntervalOpenClosed, gen, 1, 100);
+      },
+      " 0 is not in \\(1, 100\\]");
+  EXPECT_DEATH_IF_SUPPORTED(
+      {
+        EXPECT_CALL(absl::MockUniform<int>(),
+                    Call(absl::IntervalOpenClosed, gen, 1, 100))
+            .WillOnce(Return(101));
+        absl::Uniform<int>(absl::IntervalOpenClosed, gen, 1, 100);
+      },
+      " 101 is not in \\(1, 100\\]");
+
+  EXPECT_DEATH_IF_SUPPORTED(
+      {
+        EXPECT_CALL(absl::MockUniform<int>(),
+                    Call(absl::IntervalClosed, gen, 1, 100))
+            .WillOnce(Return(0));
+        absl::Uniform<int>(absl::IntervalClosed, gen, 1, 100);
+      },
+      " 0 is not in \\[1, 100\\]");
+  EXPECT_DEATH_IF_SUPPORTED(
+      {
+        EXPECT_CALL(absl::MockUniform<int>(),
+                    Call(absl::IntervalClosed, gen, 1, 100))
+            .WillOnce(Return(101));
+        absl::Uniform<int>(absl::IntervalClosed, gen, 1, 100);
+      },
+      " 101 is not in \\[1, 100\\]");
+}
+
+TEST(ValidatedMockUniformDeathTest, DoubleBoundaryCases) {
+  absl::MockingBitGen gen;
+
+  EXPECT_DEATH_IF_SUPPORTED(
+      {
+        EXPECT_CALL(absl::MockUniform<double>(), Call(gen, 1.0, 10.0))
+            .WillOnce(Return(10.0));
+        EXPECT_EQ(absl::Uniform<double>(gen, 1.0, 10.0), 10.0);
+      },
+      " 10 is not in \\[1, 10\\)");
+
+  EXPECT_DEATH_IF_SUPPORTED(
+      {
+        EXPECT_CALL(absl::MockUniform<double>(),
+                    Call(absl::IntervalOpen, gen, 1.0, 10.0))
+            .WillOnce(Return(10.0));
+        EXPECT_EQ(absl::Uniform<double>(absl::IntervalOpen, gen, 1.0, 10.0),
+                  10.0);
+      },
+      " 10 is not in \\(1, 10\\)");
+
+  EXPECT_DEATH_IF_SUPPORTED(
+      {
+        EXPECT_CALL(absl::MockUniform<double>(),
+                    Call(absl::IntervalOpen, gen, 1.0, 10.0))
+            .WillOnce(Return(1.0));
+        EXPECT_EQ(absl::Uniform<double>(absl::IntervalOpen, gen, 1.0, 10.0),
+                  1.0);
+      },
+      " 1 is not in \\(1, 10\\)");
+}
+
 }  // namespace
diff --git a/absl/random/mocking_bit_gen.h b/absl/random/mocking_bit_gen.h
index 89fa5a4..041989d 100644
--- a/absl/random/mocking_bit_gen.h
+++ b/absl/random/mocking_bit_gen.h
@@ -28,37 +28,175 @@
 #ifndef ABSL_RANDOM_MOCKING_BIT_GEN_H_
 #define ABSL_RANDOM_MOCKING_BIT_GEN_H_
 
-#include <iterator>
-#include <limits>
 #include <memory>
 #include <tuple>
 #include <type_traits>
 #include <utility>
 
 #include "gmock/gmock.h"
-#include "gtest/gtest.h"
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
 #include "absl/base/internal/fast_type_id.h"
 #include "absl/container/flat_hash_map.h"
 #include "absl/meta/type_traits.h"
-#include "absl/random/distributions.h"
-#include "absl/random/internal/distribution_caller.h"
+#include "absl/random/internal/mock_helpers.h"
 #include "absl/random/random.h"
-#include "absl/strings/str_cat.h"
-#include "absl/strings/str_join.h"
-#include "absl/types/span.h"
-#include "absl/types/variant.h"
 #include "absl/utility/utility.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 
+class BitGenRef;
+
 namespace random_internal {
 template <typename>
 struct DistributionCaller;
 class MockHelpers;
 
+// Implements MockingBitGen with an option to turn on extra validation.
+template <bool EnableValidation>
+class MockingBitGenImpl {
+ public:
+  MockingBitGenImpl() = default;
+  ~MockingBitGenImpl() = default;
+
+  // URBG interface
+  using result_type = absl::BitGen::result_type;
+
+  static constexpr result_type(min)() { return (absl::BitGen::min)(); }
+  static constexpr result_type(max)() { return (absl::BitGen::max)(); }
+  result_type operator()() { return gen_(); }
+
+ private:
+  // GetMockFnType returns the testing::MockFunction for a result and tuple.
+  // This method only exists for type deduction and is otherwise unimplemented.
+  template <typename ResultT, typename... Args>
+  static auto GetMockFnType(ResultT, std::tuple<Args...>)
+      -> ::testing::MockFunction<ResultT(Args...)>;
+
+  // MockFnCaller is a helper method for use with absl::apply to
+  // apply an ArgTupleT to a compatible MockFunction.
+  // NOTE: MockFnCaller is essentially equivalent to the lambda:
+  // [fn](auto... args) { return fn->Call(std::move(args)...)}
+  // however that fails to build on some supported platforms.
+  template <typename MockFnType, typename ValidatorT, typename ResultT,
+            typename Tuple>
+  struct MockFnCaller;
+
+  // specialization for std::tuple.
+  template <typename MockFnType, typename ValidatorT, typename ResultT,
+            typename... Args>
+  struct MockFnCaller<MockFnType, ValidatorT, ResultT, std::tuple<Args...>> {
+    MockFnType* fn;
+    inline ResultT operator()(Args... args) {
+      ResultT result = fn->Call(args...);
+      ValidatorT::Validate(result, args...);
+      return result;
+    }
+  };
+
+  // FunctionHolder owns a particular ::testing::MockFunction associated with
+  // a mocked type signature, and implement the type-erased Apply call, which
+  // applies type-erased arguments to the mock.
+  class FunctionHolder {
+   public:
+    virtual ~FunctionHolder() = default;
+
+    // Call is a dispatch function which converts the
+    // generic type-erased parameters into a specific mock invocation call.
+    virtual void Apply(/*ArgTupleT*/ void* args_tuple,
+                       /*ResultT*/ void* result) = 0;
+  };
+
+  template <typename MockFnType, typename ValidatorT, typename ResultT,
+            typename ArgTupleT>
+  class FunctionHolderImpl final : public FunctionHolder {
+   public:
+    void Apply(void* args_tuple, void* result) final {
+      // Requires tuple_args to point to a ArgTupleT, which is a
+      // std::tuple<Args...> used to invoke the mock function. Requires result
+      // to point to a ResultT, which is the result of the call.
+      *static_cast<ResultT*>(result) = absl::apply(
+          MockFnCaller<MockFnType, ValidatorT, ResultT, ArgTupleT>{&mock_fn_},
+          *static_cast<ArgTupleT*>(args_tuple));
+    }
+
+    MockFnType mock_fn_;
+  };
+
+  // MockingBitGen::RegisterMock
+  //
+  // RegisterMock<ResultT, ArgTupleT>(FastTypeIdType) is the main extension
+  // point for extending the MockingBitGen framework. It provides a mechanism to
+  // install a mock expectation for a function like ResultT(Args...) keyed by
+  // type_idex onto the MockingBitGen context. The key is that the type_index
+  // used to register must match the type index used to call the mock.
+  //
+  // The returned MockFunction<...> type can be used to setup additional
+  // distribution parameters of the expectation.
+  template <typename ResultT, typename ArgTupleT, typename SelfT,
+            typename ValidatorT>
+  auto RegisterMock(SelfT&, base_internal::FastTypeIdType type, ValidatorT)
+      -> decltype(GetMockFnType(std::declval<ResultT>(),
+                                std::declval<ArgTupleT>()))& {
+    using ActualValidatorT =
+        std::conditional_t<EnableValidation, ValidatorT, NoOpValidator>;
+    using MockFnType = decltype(GetMockFnType(std::declval<ResultT>(),
+                                              std::declval<ArgTupleT>()));
+
+    using WrappedFnType = absl::conditional_t<
+        std::is_same<SelfT, ::testing::NiceMock<MockingBitGenImpl>>::value,
+        ::testing::NiceMock<MockFnType>,
+        absl::conditional_t<
+            std::is_same<SelfT, ::testing::NaggyMock<MockingBitGenImpl>>::value,
+            ::testing::NaggyMock<MockFnType>,
+            absl::conditional_t<
+                std::is_same<SelfT,
+                             ::testing::StrictMock<MockingBitGenImpl>>::value,
+                ::testing::StrictMock<MockFnType>, MockFnType>>>;
+
+    using ImplT =
+        FunctionHolderImpl<WrappedFnType, ActualValidatorT, ResultT, ArgTupleT>;
+    auto& mock = mocks_[type];
+    if (!mock) {
+      mock = absl::make_unique<ImplT>();
+    }
+    return static_cast<ImplT*>(mock.get())->mock_fn_;
+  }
+
+  // MockingBitGen::InvokeMock
+  //
+  // InvokeMock(FastTypeIdType, args, result) is the entrypoint for invoking
+  // mocks registered on MockingBitGen.
+  //
+  // When no mocks are registered on the provided FastTypeIdType, returns false.
+  // Otherwise attempts to invoke the mock function ResultT(Args...) that
+  // was previously registered via the type_index.
+  // Requires tuple_args to point to a ArgTupleT, which is a std::tuple<Args...>
+  // used to invoke the mock function.
+  // Requires result to point to a ResultT, which is the result of the call.
+  inline bool InvokeMock(base_internal::FastTypeIdType type, void* args_tuple,
+                         void* result) {
+    // Trigger a mock, if there exists one that matches `param`.
+    auto it = mocks_.find(type);
+    if (it == mocks_.end()) return false;
+    it->second->Apply(args_tuple, result);
+    return true;
+  }
+
+  absl::flat_hash_map<base_internal::FastTypeIdType,
+                      std::unique_ptr<FunctionHolder>>
+      mocks_;
+  absl::BitGen gen_;
+
+  template <typename>
+  friend struct ::absl::random_internal::DistributionCaller;  // for InvokeMock
+  friend class ::absl::BitGenRef;                             // for InvokeMock
+  friend class ::absl::random_internal::MockHelpers;  // for RegisterMock,
+                                                      // InvokeMock
+};
+
 }  // namespace random_internal
-class BitGenRef;
 
 // MockingBitGen
 //
@@ -101,138 +239,14 @@
 // since the  underlying implementation creates a type-specific pointer which
 // will be distinct across different DLL boundaries.
 //
-class MockingBitGen {
- public:
-  MockingBitGen() = default;
-  ~MockingBitGen() = default;
+using MockingBitGen = random_internal::MockingBitGenImpl<true>;
 
-  // URBG interface
-  using result_type = absl::BitGen::result_type;
-
-  static constexpr result_type(min)() { return (absl::BitGen::min)(); }
-  static constexpr result_type(max)() { return (absl::BitGen::max)(); }
-  result_type operator()() { return gen_(); }
-
- private:
-  // GetMockFnType returns the testing::MockFunction for a result and tuple.
-  // This method only exists for type deduction and is otherwise unimplemented.
-  template <typename ResultT, typename... Args>
-  static auto GetMockFnType(ResultT, std::tuple<Args...>)
-      -> ::testing::MockFunction<ResultT(Args...)>;
-
-  // MockFnCaller is a helper method for use with absl::apply to
-  // apply an ArgTupleT to a compatible MockFunction.
-  // NOTE: MockFnCaller is essentially equivalent to the lambda:
-  // [fn](auto... args) { return fn->Call(std::move(args)...)}
-  // however that fails to build on some supported platforms.
-  template <typename MockFnType, typename ResultT, typename Tuple>
-  struct MockFnCaller;
-
-  // specialization for std::tuple.
-  template <typename MockFnType, typename ResultT, typename... Args>
-  struct MockFnCaller<MockFnType, ResultT, std::tuple<Args...>> {
-    MockFnType* fn;
-    inline ResultT operator()(Args... args) {
-      return fn->Call(std::move(args)...);
-    }
-  };
-
-  // FunctionHolder owns a particular ::testing::MockFunction associated with
-  // a mocked type signature, and implement the type-erased Apply call, which
-  // applies type-erased arguments to the mock.
-  class FunctionHolder {
-   public:
-    virtual ~FunctionHolder() = default;
-
-    // Call is a dispatch function which converts the
-    // generic type-erased parameters into a specific mock invocation call.
-    virtual void Apply(/*ArgTupleT*/ void* args_tuple,
-                       /*ResultT*/ void* result) = 0;
-  };
-
-  template <typename MockFnType, typename ResultT, typename ArgTupleT>
-  class FunctionHolderImpl final : public FunctionHolder {
-   public:
-    void Apply(void* args_tuple, void* result) override {
-      // Requires tuple_args to point to a ArgTupleT, which is a
-      // std::tuple<Args...> used to invoke the mock function. Requires result
-      // to point to a ResultT, which is the result of the call.
-      *static_cast<ResultT*>(result) =
-          absl::apply(MockFnCaller<MockFnType, ResultT, ArgTupleT>{&mock_fn_},
-                      *static_cast<ArgTupleT*>(args_tuple));
-    }
-
-    MockFnType mock_fn_;
-  };
-
-  // MockingBitGen::RegisterMock
-  //
-  // RegisterMock<ResultT, ArgTupleT>(FastTypeIdType) is the main extension
-  // point for extending the MockingBitGen framework. It provides a mechanism to
-  // install a mock expectation for a function like ResultT(Args...) keyed by
-  // type_idex onto the MockingBitGen context. The key is that the type_index
-  // used to register must match the type index used to call the mock.
-  //
-  // The returned MockFunction<...> type can be used to setup additional
-  // distribution parameters of the expectation.
-  template <typename ResultT, typename ArgTupleT, typename SelfT>
-  auto RegisterMock(SelfT&, base_internal::FastTypeIdType type)
-      -> decltype(GetMockFnType(std::declval<ResultT>(),
-                                std::declval<ArgTupleT>()))& {
-    using MockFnType = decltype(GetMockFnType(std::declval<ResultT>(),
-                                              std::declval<ArgTupleT>()));
-
-    using WrappedFnType = absl::conditional_t<
-        std::is_same<SelfT, ::testing::NiceMock<absl::MockingBitGen>>::value,
-        ::testing::NiceMock<MockFnType>,
-        absl::conditional_t<
-            std::is_same<SelfT,
-                         ::testing::NaggyMock<absl::MockingBitGen>>::value,
-            ::testing::NaggyMock<MockFnType>,
-            absl::conditional_t<
-                std::is_same<SelfT,
-                             ::testing::StrictMock<absl::MockingBitGen>>::value,
-                ::testing::StrictMock<MockFnType>, MockFnType>>>;
-
-    using ImplT = FunctionHolderImpl<WrappedFnType, ResultT, ArgTupleT>;
-    auto& mock = mocks_[type];
-    if (!mock) {
-      mock = absl::make_unique<ImplT>();
-    }
-    return static_cast<ImplT*>(mock.get())->mock_fn_;
-  }
-
-  // MockingBitGen::InvokeMock
-  //
-  // InvokeMock(FastTypeIdType, args, result) is the entrypoint for invoking
-  // mocks registered on MockingBitGen.
-  //
-  // When no mocks are registered on the provided FastTypeIdType, returns false.
-  // Otherwise attempts to invoke the mock function ResultT(Args...) that
-  // was previously registered via the type_index.
-  // Requires tuple_args to point to a ArgTupleT, which is a std::tuple<Args...>
-  // used to invoke the mock function.
-  // Requires result to point to a ResultT, which is the result of the call.
-  inline bool InvokeMock(base_internal::FastTypeIdType type, void* args_tuple,
-                         void* result) {
-    // Trigger a mock, if there exists one that matches `param`.
-    auto it = mocks_.find(type);
-    if (it == mocks_.end()) return false;
-    it->second->Apply(args_tuple, result);
-    return true;
-  }
-
-  absl::flat_hash_map<base_internal::FastTypeIdType,
-                      std::unique_ptr<FunctionHolder>>
-      mocks_;
-  absl::BitGen gen_;
-
-  template <typename>
-  friend struct ::absl::random_internal::DistributionCaller;  // for InvokeMock
-  friend class ::absl::BitGenRef;                             // for InvokeMock
-  friend class ::absl::random_internal::MockHelpers;  // for RegisterMock,
-                                                      // InvokeMock
-};
+// UnvalidatedMockingBitGen
+//
+// UnvalidatedMockingBitGen is a variant of MockingBitGen which does no extra
+// validation.
+using UnvalidatedMockingBitGen ABSL_DEPRECATED("Use MockingBitGen instead") =
+    random_internal::MockingBitGenImpl<false>;
 
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/absl/random/mocking_bit_gen_test.cc b/absl/random/mocking_bit_gen_test.cc
index c713cea..26e673a 100644
--- a/absl/random/mocking_bit_gen_test.cc
+++ b/absl/random/mocking_bit_gen_test.cc
@@ -16,8 +16,11 @@
 #include "absl/random/mocking_bit_gen.h"
 
 #include <cmath>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
 #include <numeric>
-#include <random>
+#include <vector>
 
 #include "gmock/gmock.h"
 #include "gtest/gtest-spi.h"
@@ -176,12 +179,18 @@
   EXPECT_NE(get_value(mocked_with_11), 11);
 }
 
-TEST(BasicMocking, MocksNotTrigeredForIncorrectTypes) {
+TEST(BasicMocking, MocksNotTriggeredForIncorrectTypes) {
   absl::MockingBitGen gen;
-  EXPECT_CALL(absl::MockUniform<uint32_t>(), Call(gen)).WillOnce(Return(42));
+  EXPECT_CALL(absl::MockUniform<uint32_t>(), Call(gen))
+      .WillRepeatedly(Return(42));
 
-  EXPECT_NE(absl::Uniform<uint16_t>(gen), 42);  // Not mocked
-  EXPECT_EQ(absl::Uniform<uint32_t>(gen), 42);  // Mock triggered
+  bool uint16_always42 = true;
+  for (int i = 0; i < 10000; i++) {
+    EXPECT_EQ(absl::Uniform<uint32_t>(gen), 42);  // Mock triggered.
+    // uint16_t not mocked.
+    uint16_always42 = uint16_always42 && absl::Uniform<uint16_t>(gen) == 42;
+  }
+  EXPECT_FALSE(uint16_always42);
 }
 
 TEST(BasicMocking, FailsOnUnsatisfiedMocks) {
@@ -239,33 +248,33 @@
   absl::MockingBitGen gen;
   EXPECT_CALL(absl::MockUniform<int>(), Call(gen, 1, 1000000))
       .Times(3)
-      .WillRepeatedly(Return(0));
+      .WillRepeatedly(Return(1));
   EXPECT_CALL(absl::MockUniform<int>(), Call(gen, 1000001, 2000000))
       .Times(3)
-      .WillRepeatedly(Return(1));
-  EXPECT_EQ(absl::Uniform(gen, 1000001, 2000000), 1);
-  EXPECT_EQ(absl::Uniform(gen, 1, 1000000), 0);
-  EXPECT_EQ(absl::Uniform(gen, 1000001, 2000000), 1);
-  EXPECT_EQ(absl::Uniform(gen, 1, 1000000), 0);
-  EXPECT_EQ(absl::Uniform(gen, 1000001, 2000000), 1);
-  EXPECT_EQ(absl::Uniform(gen, 1, 1000000), 0);
+      .WillRepeatedly(Return(1000001));
+  EXPECT_EQ(absl::Uniform(gen, 1000001, 2000000), 1000001);
+  EXPECT_EQ(absl::Uniform(gen, 1, 1000000), 1);
+  EXPECT_EQ(absl::Uniform(gen, 1000001, 2000000), 1000001);
+  EXPECT_EQ(absl::Uniform(gen, 1, 1000000), 1);
+  EXPECT_EQ(absl::Uniform(gen, 1000001, 2000000), 1000001);
+  EXPECT_EQ(absl::Uniform(gen, 1, 1000000), 1);
 }
 
 TEST(TimesModifier, ModifierSaturatesAndExpires) {
   EXPECT_NONFATAL_FAILURE(
       []() {
         absl::MockingBitGen gen;
-        EXPECT_CALL(absl::MockUniform<int>(), Call(gen, 1, 1000000))
+        EXPECT_CALL(absl::MockUniform<int>(), Call(gen, 0, 1000000))
             .Times(3)
             .WillRepeatedly(Return(15))
             .RetiresOnSaturation();
 
-        EXPECT_EQ(absl::Uniform(gen, 1, 1000000), 15);
-        EXPECT_EQ(absl::Uniform(gen, 1, 1000000), 15);
-        EXPECT_EQ(absl::Uniform(gen, 1, 1000000), 15);
+        EXPECT_EQ(absl::Uniform(gen, 0, 1000000), 15);
+        EXPECT_EQ(absl::Uniform(gen, 0, 1000000), 15);
+        EXPECT_EQ(absl::Uniform(gen, 0, 1000000), 15);
         // Times(3) has expired - Should get a different value now.
 
-        EXPECT_NE(absl::Uniform(gen, 1, 1000000), 15);
+        EXPECT_NE(absl::Uniform(gen, 0, 1000000), 15);
       }(),
       "");
 }
@@ -387,7 +396,7 @@
   EXPECT_EQ(absl::Uniform(gen, 1, 1000), 145);
 
   EXPECT_NONFATAL_FAILURE(
-      [&]() { EXPECT_EQ(absl::Uniform(gen, 10, 1000), 0); }(),
+      [&]() { EXPECT_EQ(absl::Uniform(gen, 0, 1000), 0); }(),
       "over-saturated and active");
 }
 
diff --git a/absl/random/seed_sequences.h b/absl/random/seed_sequences.h
index c3af4b0..33970be 100644
--- a/absl/random/seed_sequences.h
+++ b/absl/random/seed_sequences.h
@@ -29,9 +29,11 @@
 #include <random>
 
 #include "absl/base/config.h"
+#include "absl/base/nullability.h"
 #include "absl/random/internal/salted_seed_seq.h"
 #include "absl/random/internal/seed_material.h"
 #include "absl/random/seed_gen_exception.h"
+#include "absl/strings/string_view.h"
 #include "absl/types/span.h"
 
 namespace absl {
diff --git a/absl/status/BUILD.bazel b/absl/status/BUILD.bazel
index 981b37f..8822e0f 100644
--- a/absl/status/BUILD.bazel
+++ b/absl/status/BUILD.bazel
@@ -118,6 +118,7 @@
     srcs = ["statusor_test.cc"],
     deps = [
         ":status",
+        ":status_matchers",
         ":statusor",
         "//absl/base",
         "//absl/memory",
@@ -129,3 +130,38 @@
         "@com_google_googletest//:gtest_main",
     ],
 )
+
+cc_library(
+    name = "status_matchers",
+    testonly = 1,
+    srcs = [
+        "internal/status_matchers.cc",
+        "internal/status_matchers.h",
+    ],
+    hdrs = ["status_matchers.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":status",
+        ":statusor",
+        "//absl/base:config",
+        "//absl/strings:string_view",
+        "@com_google_googletest//:gtest",
+    ],
+)
+
+cc_test(
+    name = "status_matchers_test",
+    size = "small",
+    srcs = ["status_matchers_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":status",
+        ":status_matchers",
+        ":statusor",
+        "//absl/strings",
+        "@com_google_googletest//:gtest",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
diff --git a/absl/status/CMakeLists.txt b/absl/status/CMakeLists.txt
index 00415ab..24c01e7 100644
--- a/absl/status/CMakeLists.txt
+++ b/absl/status/CMakeLists.txt
@@ -98,7 +98,45 @@
     ${ABSL_TEST_COPTS}
   DEPS
     absl::status
+    absl::status_matchers
     absl::statusor
     absl::strings
     GTest::gmock_main
 )
+
+absl_cc_library(
+  NAME
+    status_matchers
+  HDRS
+    "status_matchers.h"
+  SRCS
+    "internal/status_matchers.h"
+    "internal/status_matchers.cc"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::base
+    absl::status
+    absl::statusor
+    absl::strings
+    GTest::gmock
+    GTest::gtest
+  PUBLIC
+  TESTONLY
+)
+
+absl_cc_test(
+  NAME
+    status_matchers_test
+  SRCS
+   "status_matchers_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::status
+    absl::statusor
+    absl::status_matchers
+    GTest::gmock_main
+)
diff --git a/absl/status/internal/status_matchers.cc b/absl/status/internal/status_matchers.cc
new file mode 100644
index 0000000..908b70b
--- /dev/null
+++ b/absl/status/internal/status_matchers.cc
@@ -0,0 +1,68 @@
+// Copyright 2024 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: status_matchers.cc
+// -----------------------------------------------------------------------------
+
+#include "absl/status/internal/status_matchers.h"
+
+#include <ostream>
+#include <string>
+
+#include "gmock/gmock.h"  // gmock_for_status_matchers.h
+#include "absl/base/config.h"
+#include "absl/status/status.h"
+
+namespace absl_testing {
+ABSL_NAMESPACE_BEGIN
+namespace status_internal {
+
+void StatusIsMatcherCommonImpl::DescribeTo(std::ostream* os) const {
+  *os << ", has a status code that ";
+  code_matcher_.DescribeTo(os);
+  *os << ", and has an error message that ";
+  message_matcher_.DescribeTo(os);
+}
+
+void StatusIsMatcherCommonImpl::DescribeNegationTo(std::ostream* os) const {
+  *os << ", or has a status code that ";
+  code_matcher_.DescribeNegationTo(os);
+  *os << ", or has an error message that ";
+  message_matcher_.DescribeNegationTo(os);
+}
+
+bool StatusIsMatcherCommonImpl::MatchAndExplain(
+    const ::absl::Status& status,
+    ::testing::MatchResultListener* result_listener) const {
+  ::testing::StringMatchResultListener inner_listener;
+  if (!code_matcher_.MatchAndExplain(status.code(), &inner_listener)) {
+    *result_listener << (inner_listener.str().empty()
+                             ? "whose status code is wrong"
+                             : "which has a status code " +
+                                   inner_listener.str());
+    return false;
+  }
+
+  if (!message_matcher_.Matches(std::string(status.message()))) {
+    *result_listener << "whose error message is wrong";
+    return false;
+  }
+
+  return true;
+}
+
+}  // namespace status_internal
+ABSL_NAMESPACE_END
+}  // namespace absl_testing
diff --git a/absl/status/internal/status_matchers.h b/absl/status/internal/status_matchers.h
new file mode 100644
index 0000000..0750622
--- /dev/null
+++ b/absl/status/internal/status_matchers.h
@@ -0,0 +1,246 @@
+// Copyright 2024 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STATUS_INTERNAL_STATUS_MATCHERS_H_
+#define ABSL_STATUS_INTERNAL_STATUS_MATCHERS_H_
+
+#include <ostream>  // NOLINT
+#include <string>
+#include <type_traits>
+#include <utility>
+
+#include "gmock/gmock.h"  // gmock_for_status_matchers.h
+#include "absl/base/config.h"
+#include "absl/status/status.h"
+#include "absl/status/statusor.h"
+#include "absl/strings/string_view.h"
+
+namespace absl_testing {
+ABSL_NAMESPACE_BEGIN
+namespace status_internal {
+
+inline const absl::Status& GetStatus(const absl::Status& status) {
+  return status;
+}
+
+template <typename T>
+inline const absl::Status& GetStatus(const absl::StatusOr<T>& status) {
+  return status.status();
+}
+
+////////////////////////////////////////////////////////////
+// Implementation of IsOkAndHolds().
+
+// Monomorphic implementation of matcher IsOkAndHolds(m).  StatusOrType is a
+// reference to StatusOr<T>.
+template <typename StatusOrType>
+class IsOkAndHoldsMatcherImpl
+    : public ::testing::MatcherInterface<StatusOrType> {
+ public:
+  typedef
+      typename std::remove_reference<StatusOrType>::type::value_type value_type;
+
+  template <typename InnerMatcher>
+  explicit IsOkAndHoldsMatcherImpl(InnerMatcher&& inner_matcher)
+      : inner_matcher_(::testing::SafeMatcherCast<const value_type&>(
+            std::forward<InnerMatcher>(inner_matcher))) {}
+
+  void DescribeTo(std::ostream* os) const override {
+    *os << "is OK and has a value that ";
+    inner_matcher_.DescribeTo(os);
+  }
+
+  void DescribeNegationTo(std::ostream* os) const override {
+    *os << "isn't OK or has a value that ";
+    inner_matcher_.DescribeNegationTo(os);
+  }
+
+  bool MatchAndExplain(
+      StatusOrType actual_value,
+      ::testing::MatchResultListener* result_listener) const override {
+    if (!GetStatus(actual_value).ok()) {
+      *result_listener << "which has status " << GetStatus(actual_value);
+      return false;
+    }
+
+    // Call through to the inner matcher.
+    return inner_matcher_.MatchAndExplain(*actual_value, result_listener);
+  }
+
+ private:
+  const ::testing::Matcher<const value_type&> inner_matcher_;
+};
+
+// Implements IsOkAndHolds(m) as a polymorphic matcher.
+template <typename InnerMatcher>
+class IsOkAndHoldsMatcher {
+ public:
+  explicit IsOkAndHoldsMatcher(InnerMatcher inner_matcher)
+      : inner_matcher_(std::forward<InnerMatcher>(inner_matcher)) {}
+
+  // Converts this polymorphic matcher to a monomorphic matcher of the
+  // given type.  StatusOrType can be either StatusOr<T> or a
+  // reference to StatusOr<T>.
+  template <typename StatusOrType>
+  operator ::testing::Matcher<StatusOrType>() const {  // NOLINT
+    return ::testing::Matcher<StatusOrType>(
+        new IsOkAndHoldsMatcherImpl<const StatusOrType&>(inner_matcher_));
+  }
+
+ private:
+  const InnerMatcher inner_matcher_;
+};
+
+////////////////////////////////////////////////////////////
+// Implementation of StatusIs().
+
+// `StatusCode` is implicitly convertible from `int`, `absl::StatusCode`, and
+//  is explicitly convertible to these types as well.
+//
+// We need this class because `absl::StatusCode` (as a scoped enum) is not
+// implicitly convertible to `int`. In order to handle use cases like
+// ```
+// StatusIs(Anyof(absl::StatusCode::kUnknown, absl::StatusCode::kCancelled))
+// ```
+// which uses polymorphic matchers, we need to unify the interfaces into
+// `Matcher<StatusCode>`.
+class StatusCode {
+ public:
+  /*implicit*/ StatusCode(int code)  // NOLINT
+      : code_(static_cast<::absl::StatusCode>(code)) {}
+  /*implicit*/ StatusCode(::absl::StatusCode code) : code_(code) {}  // NOLINT
+
+  explicit operator int() const { return static_cast<int>(code_); }
+
+  friend inline void PrintTo(const StatusCode& code, std::ostream* os) {
+    // TODO(b/321095377): Change this to print the status code as a string.
+    *os << static_cast<int>(code);
+  }
+
+ private:
+  ::absl::StatusCode code_;
+};
+
+// Relational operators to handle matchers like Eq, Lt, etc..
+inline bool operator==(const StatusCode& lhs, const StatusCode& rhs) {
+  return static_cast<int>(lhs) == static_cast<int>(rhs);
+}
+inline bool operator!=(const StatusCode& lhs, const StatusCode& rhs) {
+  return static_cast<int>(lhs) != static_cast<int>(rhs);
+}
+
+// StatusIs() is a polymorphic matcher.  This class is the common
+// implementation of it shared by all types T where StatusIs() can be
+// used as a Matcher<T>.
+class StatusIsMatcherCommonImpl {
+ public:
+  StatusIsMatcherCommonImpl(
+      ::testing::Matcher<StatusCode> code_matcher,
+      ::testing::Matcher<absl::string_view> message_matcher)
+      : code_matcher_(std::move(code_matcher)),
+        message_matcher_(std::move(message_matcher)) {}
+
+  void DescribeTo(std::ostream* os) const;
+
+  void DescribeNegationTo(std::ostream* os) const;
+
+  bool MatchAndExplain(const absl::Status& status,
+                       ::testing::MatchResultListener* result_listener) const;
+
+ private:
+  const ::testing::Matcher<StatusCode> code_matcher_;
+  const ::testing::Matcher<absl::string_view> message_matcher_;
+};
+
+// Monomorphic implementation of matcher StatusIs() for a given type
+// T.  T can be Status, StatusOr<>, or a reference to either of them.
+template <typename T>
+class MonoStatusIsMatcherImpl : public ::testing::MatcherInterface<T> {
+ public:
+  explicit MonoStatusIsMatcherImpl(StatusIsMatcherCommonImpl common_impl)
+      : common_impl_(std::move(common_impl)) {}
+
+  void DescribeTo(std::ostream* os) const override {
+    common_impl_.DescribeTo(os);
+  }
+
+  void DescribeNegationTo(std::ostream* os) const override {
+    common_impl_.DescribeNegationTo(os);
+  }
+
+  bool MatchAndExplain(
+      T actual_value,
+      ::testing::MatchResultListener* result_listener) const override {
+    return common_impl_.MatchAndExplain(GetStatus(actual_value),
+                                        result_listener);
+  }
+
+ private:
+  StatusIsMatcherCommonImpl common_impl_;
+};
+
+// Implements StatusIs() as a polymorphic matcher.
+class StatusIsMatcher {
+ public:
+  template <typename StatusCodeMatcher, typename StatusMessageMatcher>
+  StatusIsMatcher(StatusCodeMatcher&& code_matcher,
+                  StatusMessageMatcher&& message_matcher)
+      : common_impl_(::testing::MatcherCast<StatusCode>(
+                         std::forward<StatusCodeMatcher>(code_matcher)),
+                     ::testing::MatcherCast<absl::string_view>(
+                         std::forward<StatusMessageMatcher>(message_matcher))) {
+  }
+
+  // Converts this polymorphic matcher to a monomorphic matcher of the
+  // given type.  T can be StatusOr<>, Status, or a reference to
+  // either of them.
+  template <typename T>
+  /*implicit*/ operator ::testing::Matcher<T>() const {  // NOLINT
+    return ::testing::Matcher<T>(
+        new MonoStatusIsMatcherImpl<const T&>(common_impl_));
+  }
+
+ private:
+  const StatusIsMatcherCommonImpl common_impl_;
+};
+
+// Monomorphic implementation of matcher IsOk() for a given type T.
+// T can be Status, StatusOr<>, or a reference to either of them.
+template <typename T>
+class MonoIsOkMatcherImpl : public ::testing::MatcherInterface<T> {
+ public:
+  void DescribeTo(std::ostream* os) const override { *os << "is OK"; }
+  void DescribeNegationTo(std::ostream* os) const override {
+    *os << "is not OK";
+  }
+  bool MatchAndExplain(T actual_value,
+                       ::testing::MatchResultListener*) const override {
+    return GetStatus(actual_value).ok();
+  }
+};
+
+// Implements IsOk() as a polymorphic matcher.
+class IsOkMatcher {
+ public:
+  template <typename T>
+  /*implicit*/ operator ::testing::Matcher<T>() const {  // NOLINT
+    return ::testing::Matcher<T>(new MonoIsOkMatcherImpl<const T&>());
+  }
+};
+
+}  // namespace status_internal
+ABSL_NAMESPACE_END
+}  // namespace absl_testing
+
+#endif  // ABSL_STATUS_INTERNAL_STATUS_MATCHERS_H_
diff --git a/absl/status/internal/statusor_internal.h b/absl/status/internal/statusor_internal.h
index 5be9490..6760315 100644
--- a/absl/status/internal/statusor_internal.h
+++ b/absl/status/internal/statusor_internal.h
@@ -123,11 +123,70 @@
         std::is_same<absl::in_place_t, absl::remove_cvref_t<U>>,
         IsForwardingAssignmentAmbiguous<T, U>>>>;
 
+template <bool Value, typename T>
+using Equality = std::conditional_t<Value, T, absl::negation<T>>;
+
+template <bool Explicit, typename T, typename U, bool Lifetimebound>
+using IsConstructionValid = absl::conjunction<
+    Equality<Lifetimebound,
+             type_traits_internal::IsLifetimeBoundAssignment<T, U>>,
+    IsDirectInitializationValid<T, U&&>, std::is_constructible<T, U&&>,
+    Equality<!Explicit, std::is_convertible<U&&, T>>,
+    absl::disjunction<
+        std::is_same<T, absl::remove_cvref_t<U>>,
+        absl::conjunction<
+            std::conditional_t<
+                Explicit,
+                absl::negation<std::is_constructible<absl::Status, U&&>>,
+                absl::negation<std::is_convertible<U&&, absl::Status>>>,
+            absl::negation<
+                internal_statusor::HasConversionOperatorToStatusOr<T, U&&>>>>>;
+
+template <typename T, typename U, bool Lifetimebound>
+using IsAssignmentValid = absl::conjunction<
+    Equality<Lifetimebound,
+             type_traits_internal::IsLifetimeBoundAssignment<T, U>>,
+    std::is_constructible<T, U&&>, std::is_assignable<T&, U&&>,
+    absl::disjunction<
+        std::is_same<T, absl::remove_cvref_t<U>>,
+        absl::conjunction<
+            absl::negation<std::is_convertible<U&&, absl::Status>>,
+            absl::negation<HasConversionOperatorToStatusOr<T, U&&>>>>,
+    IsForwardingAssignmentValid<T, U&&>>;
+
+template <bool Explicit, typename T, typename U>
+using IsConstructionFromStatusValid = absl::conjunction<
+    absl::negation<std::is_same<absl::StatusOr<T>, absl::remove_cvref_t<U>>>,
+    absl::negation<std::is_same<T, absl::remove_cvref_t<U>>>,
+    absl::negation<std::is_same<absl::in_place_t, absl::remove_cvref_t<U>>>,
+    Equality<!Explicit, std::is_convertible<U, absl::Status>>,
+    std::is_constructible<absl::Status, U>,
+    absl::negation<HasConversionOperatorToStatusOr<T, U>>>;
+
+template <bool Explicit, typename T, typename U, bool Lifetimebound,
+          typename UQ>
+using IsConstructionFromStatusOrValid = absl::conjunction<
+    absl::negation<std::is_same<T, U>>,
+    Equality<Lifetimebound,
+             type_traits_internal::IsLifetimeBoundAssignment<T, U>>,
+    std::is_constructible<T, UQ>,
+    Equality<!Explicit, std::is_convertible<UQ, T>>,
+    absl::negation<IsConstructibleOrConvertibleFromStatusOr<T, U>>>;
+
+template <typename T, typename U, bool Lifetimebound>
+using IsStatusOrAssignmentValid = absl::conjunction<
+    absl::negation<std::is_same<T, absl::remove_cvref_t<U>>>,
+    Equality<Lifetimebound,
+             type_traits_internal::IsLifetimeBoundAssignment<T, U>>,
+    std::is_constructible<T, U>, std::is_assignable<T, U>,
+    absl::negation<IsConstructibleOrConvertibleOrAssignableFromStatusOr<
+        T, absl::remove_cvref_t<U>>>>;
+
 class Helper {
  public:
   // Move type-agnostic error handling to the .cc.
   static void HandleInvalidStatusCtorArg(absl::Nonnull<Status*>);
-  ABSL_ATTRIBUTE_NORETURN static void Crash(const absl::Status& status);
+  [[noreturn]] static void Crash(const absl::Status& status);
 };
 
 // Construct an instance of T in `p` through placement new, passing Args... to
@@ -379,7 +438,7 @@
   MoveAssignBase& operator=(MoveAssignBase&&) = delete;
 };
 
-ABSL_ATTRIBUTE_NORETURN void ThrowBadStatusOrAccess(absl::Status status);
+[[noreturn]] void ThrowBadStatusOrAccess(absl::Status status);
 
 // Used to introduce jitter into the output of printing functions for
 // `StatusOr` (i.e. `AbslStringify` and `operator<<`).
diff --git a/absl/status/status.cc b/absl/status/status.cc
index 4dd5ae0..745ab88 100644
--- a/absl/status/status.cc
+++ b/absl/status/status.cc
@@ -273,14 +273,12 @@
     case EFAULT:        // Bad address
     case EILSEQ:        // Illegal byte sequence
     case ENOPROTOOPT:   // Protocol not available
-    case ENOSTR:        // Not a STREAM
     case ENOTSOCK:      // Not a socket
     case ENOTTY:        // Inappropriate I/O control operation
     case EPROTOTYPE:    // Protocol wrong type for socket
     case ESPIPE:        // Invalid seek
       return StatusCode::kInvalidArgument;
     case ETIMEDOUT:  // Connection timed out
-    case ETIME:      // Timer expired
       return StatusCode::kDeadlineExceeded;
     case ENODEV:  // No such device
     case ENOENT:  // No such file or directory
@@ -339,9 +337,7 @@
     case EMLINK:   // Too many links
     case ENFILE:   // Too many open files in system
     case ENOBUFS:  // No buffer space available
-    case ENODATA:  // No message is available on the STREAM read queue
     case ENOMEM:   // Not enough space
-    case ENOSR:    // No STREAM resources
 #ifdef EUSERS
     case EUSERS:  // Too many users
 #endif
diff --git a/absl/status/status.h b/absl/status/status.h
index 9ce16db..6cfe49f 100644
--- a/absl/status/status.h
+++ b/absl/status/status.h
@@ -452,7 +452,7 @@
 
   // The moved-from state is valid but unspecified.
   Status(Status&&) noexcept;
-  Status& operator=(Status&&);
+  Status& operator=(Status&&) noexcept;
 
   ~Status();
 
@@ -539,7 +539,7 @@
   // swap()
   //
   // Swap the contents of one status with another.
-  friend void swap(Status& a, Status& b);
+  friend void swap(Status& a, Status& b) noexcept;
 
   //----------------------------------------------------------------------------
   // Payload Management APIs
@@ -789,7 +789,7 @@
   x.rep_ = MovedFromRep();
 }
 
-inline Status& Status::operator=(Status&& x) {
+inline Status& Status::operator=(Status&& x) noexcept {
   uintptr_t old_rep = rep_;
   if (x.rep_ != old_rep) {
     rep_ = x.rep_;
@@ -852,7 +852,7 @@
   // no-op
 }
 
-inline void swap(absl::Status& a, absl::Status& b) {
+inline void swap(absl::Status& a, absl::Status& b) noexcept {
   using std::swap;
   swap(a.rep_, b.rep_);
 }
diff --git a/absl/status/status_matchers.h b/absl/status/status_matchers.h
new file mode 100644
index 0000000..837660e
--- /dev/null
+++ b/absl/status/status_matchers.h
@@ -0,0 +1,118 @@
+// Copyright 2024 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: status_matchers.h
+// -----------------------------------------------------------------------------
+//
+// Testing utilities for working with `absl::Status` and `absl::StatusOr`.
+//
+// Defines the following utilities:
+//
+//   ===============
+//   `IsOkAndHolds(m)`
+//   ===============
+//
+//   This gMock matcher matches a StatusOr<T> value whose status is OK
+//   and whose inner value matches matcher m.  Example:
+//
+//   ```
+//   using ::testing::MatchesRegex;
+//   using ::absl_testing::IsOkAndHolds;
+//   ...
+//   absl::StatusOr<string> maybe_name = ...;
+//   EXPECT_THAT(maybe_name, IsOkAndHolds(MatchesRegex("John .*")));
+//   ```
+//
+//   ===============================
+//   `StatusIs(status_code_matcher)`
+//   ===============================
+//
+//   This is a shorthand for
+//     `StatusIs(status_code_matcher, ::testing::_)`
+//   In other words, it's like the two-argument `StatusIs()`, except that it
+//   ignores error message.
+//
+//   ===============
+//   `IsOk()`
+//   ===============
+//
+//   Matches an `absl::Status` or `absl::StatusOr<T>` value whose status value
+//   is `absl::StatusCode::kOk.`
+//
+//   Equivalent to 'StatusIs(absl::StatusCode::kOk)'.
+//   Example:
+//   ```
+//   using ::absl_testing::IsOk;
+//   ...
+//   absl::StatusOr<string> maybe_name = ...;
+//   EXPECT_THAT(maybe_name, IsOk());
+//   Status s = ...;
+//   EXPECT_THAT(s, IsOk());
+//   ```
+
+#ifndef ABSL_STATUS_STATUS_MATCHERS_H_
+#define ABSL_STATUS_STATUS_MATCHERS_H_
+
+#include <ostream>  // NOLINT
+#include <type_traits>
+#include <utility>
+
+#include "gmock/gmock.h"  // gmock_for_status_matchers.h
+#include "absl/base/config.h"
+#include "absl/status/internal/status_matchers.h"
+
+namespace absl_testing {
+ABSL_NAMESPACE_BEGIN
+
+// Returns a gMock matcher that matches a StatusOr<> whose status is
+// OK and whose value matches the inner matcher.
+template <typename InnerMatcherT>
+status_internal::IsOkAndHoldsMatcher<typename std::decay<InnerMatcherT>::type>
+IsOkAndHolds(InnerMatcherT&& inner_matcher) {
+  return status_internal::IsOkAndHoldsMatcher<
+      typename std::decay<InnerMatcherT>::type>(
+      std::forward<InnerMatcherT>(inner_matcher));
+}
+
+// Returns a gMock matcher that matches a Status or StatusOr<> whose status code
+// matches code_matcher and whose error message matches message_matcher.
+// Typically, code_matcher will be an absl::StatusCode, e.g.
+//
+// StatusIs(absl::StatusCode::kInvalidArgument, "...")
+template <typename StatusCodeMatcherT, typename StatusMessageMatcherT>
+status_internal::StatusIsMatcher StatusIs(
+    StatusCodeMatcherT&& code_matcher,
+    StatusMessageMatcherT&& message_matcher) {
+  return status_internal::StatusIsMatcher(
+      std::forward<StatusCodeMatcherT>(code_matcher),
+      std::forward<StatusMessageMatcherT>(message_matcher));
+}
+
+// Returns a gMock matcher that matches a Status or StatusOr<> and whose status
+// code matches code_matcher.  See above for details.
+template <typename StatusCodeMatcherT>
+status_internal::StatusIsMatcher StatusIs(StatusCodeMatcherT&& code_matcher) {
+  return StatusIs(std::forward<StatusCodeMatcherT>(code_matcher), ::testing::_);
+}
+
+// Returns a gMock matcher that matches a Status or StatusOr<> which is OK.
+inline status_internal::IsOkMatcher IsOk() {
+  return status_internal::IsOkMatcher();
+}
+
+ABSL_NAMESPACE_END
+}  // namespace absl_testing
+
+#endif  // ABSL_STATUS_STATUS_MATCHERS_H_
diff --git a/absl/status/status_matchers_test.cc b/absl/status/status_matchers_test.cc
new file mode 100644
index 0000000..3af0305
--- /dev/null
+++ b/absl/status/status_matchers_test.cc
@@ -0,0 +1,119 @@
+// Copyright 2024 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: status_matchers_test.cc
+// -----------------------------------------------------------------------------
+#include "absl/status/status_matchers.h"
+
+#include "gmock/gmock.h"
+#include "gtest/gtest-spi.h"
+#include "gtest/gtest.h"
+#include "absl/status/status.h"
+#include "absl/status/statusor.h"
+#include "absl/strings/string_view.h"
+
+namespace {
+
+using ::absl_testing::IsOk;
+using ::absl_testing::IsOkAndHolds;
+using ::absl_testing::StatusIs;
+using ::testing::Gt;
+
+TEST(StatusMatcherTest, StatusIsOk) { EXPECT_THAT(absl::OkStatus(), IsOk()); }
+
+TEST(StatusMatcherTest, StatusOrIsOk) {
+  absl::StatusOr<int> ok_int = {0};
+  EXPECT_THAT(ok_int, IsOk());
+}
+
+TEST(StatusMatcherTest, StatusIsNotOk) {
+  absl::Status error = absl::UnknownError("Smigla");
+  EXPECT_NONFATAL_FAILURE(EXPECT_THAT(error, IsOk()), "Smigla");
+}
+
+TEST(StatusMatcherTest, StatusOrIsNotOk) {
+  absl::StatusOr<int> error = absl::UnknownError("Smigla");
+  EXPECT_NONFATAL_FAILURE(EXPECT_THAT(error, IsOk()), "Smigla");
+}
+
+TEST(StatusMatcherTest, IsOkAndHolds) {
+  absl::StatusOr<int> ok_int = {4};
+  absl::StatusOr<absl::string_view> ok_str = {"text"};
+  EXPECT_THAT(ok_int, IsOkAndHolds(4));
+  EXPECT_THAT(ok_int, IsOkAndHolds(Gt(0)));
+  EXPECT_THAT(ok_str, IsOkAndHolds("text"));
+}
+
+TEST(StatusMatcherTest, IsOkAndHoldsFailure) {
+  absl::StatusOr<int> ok_int = {502};
+  absl::StatusOr<int> error = absl::UnknownError("Smigla");
+  absl::StatusOr<absl::string_view> ok_str = {"actual"};
+  EXPECT_NONFATAL_FAILURE(EXPECT_THAT(ok_int, IsOkAndHolds(0)), "502");
+  EXPECT_NONFATAL_FAILURE(EXPECT_THAT(error, IsOkAndHolds(0)), "Smigla");
+  EXPECT_NONFATAL_FAILURE(EXPECT_THAT(ok_str, IsOkAndHolds("expected")),
+                          "actual");
+}
+
+TEST(StatusMatcherTest, StatusIs) {
+  absl::Status unknown = absl::UnknownError("unbekannt");
+  absl::Status invalid = absl::InvalidArgumentError("ungueltig");
+  EXPECT_THAT(absl::OkStatus(), StatusIs(absl::StatusCode::kOk));
+  EXPECT_THAT(absl::OkStatus(), StatusIs(0));
+  EXPECT_THAT(unknown, StatusIs(absl::StatusCode::kUnknown));
+  EXPECT_THAT(unknown, StatusIs(2));
+  EXPECT_THAT(unknown, StatusIs(absl::StatusCode::kUnknown, "unbekannt"));
+  EXPECT_THAT(invalid, StatusIs(absl::StatusCode::kInvalidArgument));
+  EXPECT_THAT(invalid, StatusIs(3));
+  EXPECT_THAT(invalid,
+              StatusIs(absl::StatusCode::kInvalidArgument, "ungueltig"));
+}
+
+TEST(StatusMatcherTest, StatusOrIs) {
+  absl::StatusOr<int> ok = {42};
+  absl::StatusOr<int> unknown = absl::UnknownError("unbekannt");
+  absl::StatusOr<absl::string_view> invalid =
+      absl::InvalidArgumentError("ungueltig");
+  EXPECT_THAT(ok, StatusIs(absl::StatusCode::kOk));
+  EXPECT_THAT(ok, StatusIs(0));
+  EXPECT_THAT(unknown, StatusIs(absl::StatusCode::kUnknown));
+  EXPECT_THAT(unknown, StatusIs(2));
+  EXPECT_THAT(unknown, StatusIs(absl::StatusCode::kUnknown, "unbekannt"));
+  EXPECT_THAT(invalid, StatusIs(absl::StatusCode::kInvalidArgument));
+  EXPECT_THAT(invalid, StatusIs(3));
+  EXPECT_THAT(invalid,
+              StatusIs(absl::StatusCode::kInvalidArgument, "ungueltig"));
+}
+
+TEST(StatusMatcherTest, StatusIsFailure) {
+  absl::Status unknown = absl::UnknownError("unbekannt");
+  absl::Status invalid = absl::InvalidArgumentError("ungueltig");
+  EXPECT_NONFATAL_FAILURE(
+      EXPECT_THAT(absl::OkStatus(),
+                  StatusIs(absl::StatusCode::kInvalidArgument)),
+      "OK");
+  EXPECT_NONFATAL_FAILURE(
+      EXPECT_THAT(unknown, StatusIs(absl::StatusCode::kCancelled)), "UNKNOWN");
+  EXPECT_NONFATAL_FAILURE(
+      EXPECT_THAT(unknown, StatusIs(absl::StatusCode::kUnknown, "inconnu")),
+      "unbekannt");
+  EXPECT_NONFATAL_FAILURE(
+      EXPECT_THAT(invalid, StatusIs(absl::StatusCode::kOutOfRange)), "INVALID");
+  EXPECT_NONFATAL_FAILURE(
+      EXPECT_THAT(invalid,
+                  StatusIs(absl::StatusCode::kInvalidArgument, "invalide")),
+      "ungueltig");
+}
+
+}  // namespace
diff --git a/absl/status/status_test.cc b/absl/status/status_test.cc
index 585e780..c3327ad 100644
--- a/absl/status/status_test.cc
+++ b/absl/status/status_test.cc
@@ -497,8 +497,8 @@
   {
     absl::Status status(absl::StatusCode::kInvalidArgument, "message");
     absl::Status copy(status);
-    status = static_cast<absl::Status&&>(status);
-    EXPECT_EQ(status, copy);
+    assignee = static_cast<absl::Status&&>(status);
+    EXPECT_EQ(assignee, copy);
   }
 }
 
diff --git a/absl/status/statusor.h b/absl/status/statusor.h
index cd35e5b..b1da45e 100644
--- a/absl/status/statusor.h
+++ b/absl/status/statusor.h
@@ -236,57 +236,55 @@
   // is explicit if and only if the corresponding construction of `T` from `U`
   // is explicit. (This constructor inherits its explicitness from the
   // underlying constructor.)
-  template <
-      typename U,
-      absl::enable_if_t<
-          absl::conjunction<
-              absl::negation<std::is_same<T, U>>,
-              std::is_constructible<T, const U&>,
-              std::is_convertible<const U&, T>,
-              absl::negation<
-                  internal_statusor::IsConstructibleOrConvertibleFromStatusOr<
-                      T, U>>>::value,
-          int> = 0>
+  template <typename U, absl::enable_if_t<
+                            internal_statusor::IsConstructionFromStatusOrValid<
+                                false, T, U, false, const U&>::value,
+                            int> = 0>
   StatusOr(const StatusOr<U>& other)  // NOLINT
       : Base(static_cast<const typename StatusOr<U>::Base&>(other)) {}
-  template <
-      typename U,
-      absl::enable_if_t<
-          absl::conjunction<
-              absl::negation<std::is_same<T, U>>,
-              std::is_constructible<T, const U&>,
-              absl::negation<std::is_convertible<const U&, T>>,
-              absl::negation<
-                  internal_statusor::IsConstructibleOrConvertibleFromStatusOr<
-                      T, U>>>::value,
-          int> = 0>
+  template <typename U, absl::enable_if_t<
+                            internal_statusor::IsConstructionFromStatusOrValid<
+                                false, T, U, true, const U&>::value,
+                            int> = 0>
+  StatusOr(const StatusOr<U>& other ABSL_ATTRIBUTE_LIFETIME_BOUND)  // NOLINT
+      : Base(static_cast<const typename StatusOr<U>::Base&>(other)) {}
+  template <typename U, absl::enable_if_t<
+                            internal_statusor::IsConstructionFromStatusOrValid<
+                                true, T, U, false, const U&>::value,
+                            int> = 0>
   explicit StatusOr(const StatusOr<U>& other)
       : Base(static_cast<const typename StatusOr<U>::Base&>(other)) {}
+  template <typename U, absl::enable_if_t<
+                            internal_statusor::IsConstructionFromStatusOrValid<
+                                true, T, U, true, const U&>::value,
+                            int> = 0>
+  explicit StatusOr(const StatusOr<U>& other ABSL_ATTRIBUTE_LIFETIME_BOUND)
+      : Base(static_cast<const typename StatusOr<U>::Base&>(other)) {}
 
-  template <
-      typename U,
-      absl::enable_if_t<
-          absl::conjunction<
-              absl::negation<std::is_same<T, U>>, std::is_constructible<T, U&&>,
-              std::is_convertible<U&&, T>,
-              absl::negation<
-                  internal_statusor::IsConstructibleOrConvertibleFromStatusOr<
-                      T, U>>>::value,
-          int> = 0>
+  template <typename U, absl::enable_if_t<
+                            internal_statusor::IsConstructionFromStatusOrValid<
+                                false, T, U, false, U&&>::value,
+                            int> = 0>
   StatusOr(StatusOr<U>&& other)  // NOLINT
       : Base(static_cast<typename StatusOr<U>::Base&&>(other)) {}
-  template <
-      typename U,
-      absl::enable_if_t<
-          absl::conjunction<
-              absl::negation<std::is_same<T, U>>, std::is_constructible<T, U&&>,
-              absl::negation<std::is_convertible<U&&, T>>,
-              absl::negation<
-                  internal_statusor::IsConstructibleOrConvertibleFromStatusOr<
-                      T, U>>>::value,
-          int> = 0>
+  template <typename U, absl::enable_if_t<
+                            internal_statusor::IsConstructionFromStatusOrValid<
+                                false, T, U, true, U&&>::value,
+                            int> = 0>
+  StatusOr(StatusOr<U>&& other ABSL_ATTRIBUTE_LIFETIME_BOUND)  // NOLINT
+      : Base(static_cast<typename StatusOr<U>::Base&&>(other)) {}
+  template <typename U, absl::enable_if_t<
+                            internal_statusor::IsConstructionFromStatusOrValid<
+                                true, T, U, false, U&&>::value,
+                            int> = 0>
   explicit StatusOr(StatusOr<U>&& other)
       : Base(static_cast<typename StatusOr<U>::Base&&>(other)) {}
+  template <typename U, absl::enable_if_t<
+                            internal_statusor::IsConstructionFromStatusOrValid<
+                                true, T, U, true, U&&>::value,
+                            int> = 0>
+  explicit StatusOr(StatusOr<U>&& other ABSL_ATTRIBUTE_LIFETIME_BOUND)
+      : Base(static_cast<typename StatusOr<U>::Base&&>(other)) {}
 
   // Converting Assignment Operators
 
@@ -307,37 +305,38 @@
   // These overloads only apply if `absl::StatusOr<T>` is constructible and
   // assignable from `absl::StatusOr<U>` and `StatusOr<T>` cannot be directly
   // assigned from `StatusOr<U>`.
-  template <
-      typename U,
-      absl::enable_if_t<
-          absl::conjunction<
-              absl::negation<std::is_same<T, U>>,
-              std::is_constructible<T, const U&>,
-              std::is_assignable<T, const U&>,
-              absl::negation<
-                  internal_statusor::
-                      IsConstructibleOrConvertibleOrAssignableFromStatusOr<
-                          T, U>>>::value,
-          int> = 0>
+  template <typename U,
+            absl::enable_if_t<internal_statusor::IsStatusOrAssignmentValid<
+                                  T, const U&, false>::value,
+                              int> = 0>
   StatusOr& operator=(const StatusOr<U>& other) {
     this->Assign(other);
     return *this;
   }
-  template <
-      typename U,
-      absl::enable_if_t<
-          absl::conjunction<
-              absl::negation<std::is_same<T, U>>, std::is_constructible<T, U&&>,
-              std::is_assignable<T, U&&>,
-              absl::negation<
-                  internal_statusor::
-                      IsConstructibleOrConvertibleOrAssignableFromStatusOr<
-                          T, U>>>::value,
-          int> = 0>
+  template <typename U,
+            absl::enable_if_t<internal_statusor::IsStatusOrAssignmentValid<
+                                  T, const U&, true>::value,
+                              int> = 0>
+  StatusOr& operator=(const StatusOr<U>& other ABSL_ATTRIBUTE_LIFETIME_BOUND) {
+    this->Assign(other);
+    return *this;
+  }
+  template <typename U,
+            absl::enable_if_t<internal_statusor::IsStatusOrAssignmentValid<
+                                  T, U&&, false>::value,
+                              int> = 0>
   StatusOr& operator=(StatusOr<U>&& other) {
     this->Assign(std::move(other));
     return *this;
   }
+  template <typename U,
+            absl::enable_if_t<internal_statusor::IsStatusOrAssignmentValid<
+                                  T, U&&, true>::value,
+                              int> = 0>
+  StatusOr& operator=(StatusOr<U>&& other ABSL_ATTRIBUTE_LIFETIME_BOUND) {
+    this->Assign(std::move(other));
+    return *this;
+  }
 
   // Constructs a new `absl::StatusOr<T>` with a non-ok status. After calling
   // this constructor, `this->ok()` will be `false` and calls to `value()` will
@@ -350,46 +349,21 @@
   // REQUIRES: !Status(std::forward<U>(v)).ok(). This requirement is DCHECKed.
   // In optimized builds, passing absl::OkStatus() here will have the effect
   // of passing absl::StatusCode::kInternal as a fallback.
-  template <
-      typename U = absl::Status,
-      absl::enable_if_t<
-          absl::conjunction<
-              std::is_convertible<U&&, absl::Status>,
-              std::is_constructible<absl::Status, U&&>,
-              absl::negation<std::is_same<absl::decay_t<U>, absl::StatusOr<T>>>,
-              absl::negation<std::is_same<absl::decay_t<U>, T>>,
-              absl::negation<std::is_same<absl::decay_t<U>, absl::in_place_t>>,
-              absl::negation<internal_statusor::HasConversionOperatorToStatusOr<
-                  T, U&&>>>::value,
-          int> = 0>
+  template <typename U = absl::Status,
+            absl::enable_if_t<internal_statusor::IsConstructionFromStatusValid<
+                                  false, T, U>::value,
+                              int> = 0>
   StatusOr(U&& v) : Base(std::forward<U>(v)) {}
 
-  template <
-      typename U = absl::Status,
-      absl::enable_if_t<
-          absl::conjunction<
-              absl::negation<std::is_convertible<U&&, absl::Status>>,
-              std::is_constructible<absl::Status, U&&>,
-              absl::negation<std::is_same<absl::decay_t<U>, absl::StatusOr<T>>>,
-              absl::negation<std::is_same<absl::decay_t<U>, T>>,
-              absl::negation<std::is_same<absl::decay_t<U>, absl::in_place_t>>,
-              absl::negation<internal_statusor::HasConversionOperatorToStatusOr<
-                  T, U&&>>>::value,
-          int> = 0>
+  template <typename U = absl::Status,
+            absl::enable_if_t<internal_statusor::IsConstructionFromStatusValid<
+                                  true, T, U>::value,
+                              int> = 0>
   explicit StatusOr(U&& v) : Base(std::forward<U>(v)) {}
-
-  template <
-      typename U = absl::Status,
-      absl::enable_if_t<
-          absl::conjunction<
-              std::is_convertible<U&&, absl::Status>,
-              std::is_constructible<absl::Status, U&&>,
-              absl::negation<std::is_same<absl::decay_t<U>, absl::StatusOr<T>>>,
-              absl::negation<std::is_same<absl::decay_t<U>, T>>,
-              absl::negation<std::is_same<absl::decay_t<U>, absl::in_place_t>>,
-              absl::negation<internal_statusor::HasConversionOperatorToStatusOr<
-                  T, U&&>>>::value,
-          int> = 0>
+  template <typename U = absl::Status,
+            absl::enable_if_t<internal_statusor::IsConstructionFromStatusValid<
+                                  false, T, U>::value,
+                              int> = 0>
   StatusOr& operator=(U&& v) {
     this->AssignStatus(std::forward<U>(v));
     return *this;
@@ -411,21 +385,22 @@
   //    StatusOr<bool> s1 = true;  // s1.ok() && *s1 == true
   //    StatusOr<bool> s2 = false;  // s2.ok() && *s2 == false
   //    s1 = s2;  // ambiguous, `s1 = *s2` or `s1 = bool(s2)`?
-  template <
-      typename U = T,
-      typename = typename std::enable_if<absl::conjunction<
-          std::is_constructible<T, U&&>, std::is_assignable<T&, U&&>,
-          absl::disjunction<
-              std::is_same<absl::remove_cvref_t<U>, T>,
-              absl::conjunction<
-                  absl::negation<std::is_convertible<U&&, absl::Status>>,
-                  absl::negation<internal_statusor::
-                                     HasConversionOperatorToStatusOr<T, U&&>>>>,
-          internal_statusor::IsForwardingAssignmentValid<T, U&&>>::value>::type>
+  template <typename U = T,
+            typename std::enable_if<
+                internal_statusor::IsAssignmentValid<T, U, false>::value,
+                int>::type = 0>
   StatusOr& operator=(U&& v) {
     this->Assign(std::forward<U>(v));
     return *this;
   }
+  template <typename U = T,
+            typename std::enable_if<
+                internal_statusor::IsAssignmentValid<T, U, true>::value,
+                int>::type = 0>
+  StatusOr& operator=(U&& v ABSL_ATTRIBUTE_LIFETIME_BOUND) {
+    this->Assign(std::forward<U>(v));
+    return *this;
+  }
 
   // Constructs the inner value `T` in-place using the provided args, using the
   // `T(args...)` constructor.
@@ -442,40 +417,31 @@
   // This constructor is explicit if `U` is not convertible to `T`. To avoid
   // ambiguity, this constructor is disabled if `U` is a `StatusOr<J>`, where
   // `J` is convertible to `T`.
-  template <
-      typename U = T,
-      absl::enable_if_t<
-          absl::conjunction<
-              internal_statusor::IsDirectInitializationValid<T, U&&>,
-              std::is_constructible<T, U&&>, std::is_convertible<U&&, T>,
-              absl::disjunction<
-                  std::is_same<absl::remove_cvref_t<U>, T>,
-                  absl::conjunction<
-                      absl::negation<std::is_convertible<U&&, absl::Status>>,
-                      absl::negation<
-                          internal_statusor::HasConversionOperatorToStatusOr<
-                              T, U&&>>>>>::value,
-          int> = 0>
+  template <typename U = T,
+            absl::enable_if_t<internal_statusor::IsConstructionValid<
+                                  false, T, U, false>::value,
+                              int> = 0>
   StatusOr(U&& u)  // NOLINT
       : StatusOr(absl::in_place, std::forward<U>(u)) {}
+  template <typename U = T,
+            absl::enable_if_t<internal_statusor::IsConstructionValid<
+                                  false, T, U, true>::value,
+                              int> = 0>
+  StatusOr(U&& u ABSL_ATTRIBUTE_LIFETIME_BOUND)  // NOLINT
+      : StatusOr(absl::in_place, std::forward<U>(u)) {}
 
-  template <
-      typename U = T,
-      absl::enable_if_t<
-          absl::conjunction<
-              internal_statusor::IsDirectInitializationValid<T, U&&>,
-              absl::disjunction<
-                  std::is_same<absl::remove_cvref_t<U>, T>,
-                  absl::conjunction<
-                      absl::negation<std::is_constructible<absl::Status, U&&>>,
-                      absl::negation<
-                          internal_statusor::HasConversionOperatorToStatusOr<
-                              T, U&&>>>>,
-              std::is_constructible<T, U&&>,
-              absl::negation<std::is_convertible<U&&, T>>>::value,
-          int> = 0>
+  template <typename U = T,
+            absl::enable_if_t<internal_statusor::IsConstructionValid<
+                                  true, T, U, false>::value,
+                              int> = 0>
   explicit StatusOr(U&& u)  // NOLINT
       : StatusOr(absl::in_place, std::forward<U>(u)) {}
+  template <typename U = T,
+            absl::enable_if_t<
+                internal_statusor::IsConstructionValid<true, T, U, true>::value,
+                int> = 0>
+  explicit StatusOr(U&& u ABSL_ATTRIBUTE_LIFETIME_BOUND)  // NOLINT
+      : StatusOr(absl::in_place, std::forward<U>(u)) {}
 
   // StatusOr<T>::ok()
   //
diff --git a/absl/status/statusor_test.cc b/absl/status/statusor_test.cc
index 09ffc65..8341040 100644
--- a/absl/status/statusor_test.cc
+++ b/absl/status/statusor_test.cc
@@ -31,6 +31,7 @@
 #include "absl/base/casts.h"
 #include "absl/memory/memory.h"
 #include "absl/status/status.h"
+#include "absl/status/status_matchers.h"
 #include "absl/strings/str_cat.h"
 #include "absl/strings/string_view.h"
 #include "absl/types/any.h"
@@ -39,6 +40,8 @@
 
 namespace {
 
+using ::absl_testing::IsOk;
+using ::absl_testing::IsOkAndHolds;
 using ::testing::AllOf;
 using ::testing::AnyOf;
 using ::testing::AnyWith;
@@ -52,128 +55,6 @@
 using ::testing::StartsWith;
 using ::testing::VariantWith;
 
-#ifdef GTEST_HAS_STATUS_MATCHERS
-using ::testing::status::IsOk;
-using ::testing::status::IsOkAndHolds;
-#else  // GTEST_HAS_STATUS_MATCHERS
-inline const ::absl::Status& GetStatus(const ::absl::Status& status) {
-  return status;
-}
-
-template <typename T>
-inline const ::absl::Status& GetStatus(const ::absl::StatusOr<T>& status) {
-  return status.status();
-}
-
-// Monomorphic implementation of matcher IsOkAndHolds(m).  StatusOrType is a
-// reference to StatusOr<T>.
-template <typename StatusOrType>
-class IsOkAndHoldsMatcherImpl
-    : public ::testing::MatcherInterface<StatusOrType> {
- public:
-  typedef
-      typename std::remove_reference<StatusOrType>::type::value_type value_type;
-
-  template <typename InnerMatcher>
-  explicit IsOkAndHoldsMatcherImpl(InnerMatcher&& inner_matcher)
-      : inner_matcher_(::testing::SafeMatcherCast<const value_type&>(
-            std::forward<InnerMatcher>(inner_matcher))) {}
-
-  void DescribeTo(std::ostream* os) const override {
-    *os << "is OK and has a value that ";
-    inner_matcher_.DescribeTo(os);
-  }
-
-  void DescribeNegationTo(std::ostream* os) const override {
-    *os << "isn't OK or has a value that ";
-    inner_matcher_.DescribeNegationTo(os);
-  }
-
-  bool MatchAndExplain(
-      StatusOrType actual_value,
-      ::testing::MatchResultListener* result_listener) const override {
-    if (!actual_value.ok()) {
-      *result_listener << "which has status " << actual_value.status();
-      return false;
-    }
-
-    ::testing::StringMatchResultListener inner_listener;
-    const bool matches =
-        inner_matcher_.MatchAndExplain(*actual_value, &inner_listener);
-    const std::string inner_explanation = inner_listener.str();
-    if (!inner_explanation.empty()) {
-      *result_listener << "which contains value "
-                       << ::testing::PrintToString(*actual_value) << ", "
-                       << inner_explanation;
-    }
-    return matches;
-  }
-
- private:
-  const ::testing::Matcher<const value_type&> inner_matcher_;
-};
-
-// Implements IsOkAndHolds(m) as a polymorphic matcher.
-template <typename InnerMatcher>
-class IsOkAndHoldsMatcher {
- public:
-  explicit IsOkAndHoldsMatcher(InnerMatcher inner_matcher)
-      : inner_matcher_(std::move(inner_matcher)) {}
-
-  // Converts this polymorphic matcher to a monomorphic matcher of the
-  // given type.  StatusOrType can be either StatusOr<T> or a
-  // reference to StatusOr<T>.
-  template <typename StatusOrType>
-  operator ::testing::Matcher<StatusOrType>() const {  // NOLINT
-    return ::testing::Matcher<StatusOrType>(
-        new IsOkAndHoldsMatcherImpl<const StatusOrType&>(inner_matcher_));
-  }
-
- private:
-  const InnerMatcher inner_matcher_;
-};
-
-// Monomorphic implementation of matcher IsOk() for a given type T.
-// T can be Status, StatusOr<>, or a reference to either of them.
-template <typename T>
-class MonoIsOkMatcherImpl : public ::testing::MatcherInterface<T> {
- public:
-  void DescribeTo(std::ostream* os) const override { *os << "is OK"; }
-  void DescribeNegationTo(std::ostream* os) const override {
-    *os << "is not OK";
-  }
-  bool MatchAndExplain(T actual_value,
-                       ::testing::MatchResultListener*) const override {
-    return GetStatus(actual_value).ok();
-  }
-};
-
-// Implements IsOk() as a polymorphic matcher.
-class IsOkMatcher {
- public:
-  template <typename T>
-  operator ::testing::Matcher<T>() const {  // NOLINT
-    return ::testing::Matcher<T>(new MonoIsOkMatcherImpl<T>());
-  }
-};
-
-// Macros for testing the results of functions that return absl::Status or
-// absl::StatusOr<T> (for any type T).
-#define EXPECT_OK(expression) EXPECT_THAT(expression, IsOk())
-
-// Returns a gMock matcher that matches a StatusOr<> whose status is
-// OK and whose value matches the inner matcher.
-template <typename InnerMatcher>
-IsOkAndHoldsMatcher<typename std::decay<InnerMatcher>::type> IsOkAndHolds(
-    InnerMatcher&& inner_matcher) {
-  return IsOkAndHoldsMatcher<typename std::decay<InnerMatcher>::type>(
-      std::forward<InnerMatcher>(inner_matcher));
-}
-
-// Returns a gMock matcher that matches a Status or StatusOr<> which is OK.
-inline IsOkMatcher IsOk() { return IsOkMatcher(); }
-#endif  // GTEST_HAS_STATUS_MATCHERS
-
 struct CopyDetector {
   CopyDetector() = default;
   explicit CopyDetector(int xx) : x(xx) {}
@@ -527,7 +408,7 @@
   const int kI = 4;
   const absl::StatusOr<int> original(kI);
   const absl::StatusOr<int> copy(original);
-  EXPECT_OK(copy.status());
+  EXPECT_THAT(copy.status(), IsOk());
   EXPECT_EQ(*original, *copy);
 }
 
@@ -542,7 +423,7 @@
   CopyNoAssign value(kI);
   absl::StatusOr<CopyNoAssign> original(value);
   absl::StatusOr<CopyNoAssign> copy(original);
-  EXPECT_OK(copy.status());
+  EXPECT_THAT(copy.status(), IsOk());
   EXPECT_EQ(original->foo, copy->foo);
 }
 
@@ -550,7 +431,7 @@
   const int kI = 4;
   absl::StatusOr<int> original(kI);
   absl::StatusOr<double> copy(original);
-  EXPECT_OK(copy.status());
+  EXPECT_THAT(copy.status(), IsOk());
   EXPECT_DOUBLE_EQ(*original, *copy);
 }
 
@@ -570,11 +451,11 @@
     target = source;
 
     ASSERT_TRUE(target.ok());
-    EXPECT_OK(target.status());
+    EXPECT_THAT(target.status(), IsOk());
     EXPECT_EQ(p, *target);
 
     ASSERT_TRUE(source.ok());
-    EXPECT_OK(source.status());
+    EXPECT_THAT(source.status(), IsOk());
     EXPECT_EQ(p, *source);
   }
 
@@ -587,11 +468,11 @@
     target = std::move(source);
 
     ASSERT_TRUE(target.ok());
-    EXPECT_OK(target.status());
+    EXPECT_THAT(target.status(), IsOk());
     EXPECT_EQ(p, *target);
 
     ASSERT_TRUE(source.ok());
-    EXPECT_OK(source.status());
+    EXPECT_THAT(source.status(), IsOk());
     EXPECT_EQ(nullptr, *source);
   }
 }
@@ -638,11 +519,11 @@
     target = source;
 
     ASSERT_TRUE(target.ok());
-    EXPECT_OK(target.status());
+    EXPECT_THAT(target.status(), IsOk());
     EXPECT_DOUBLE_EQ(kI, *target);
 
     ASSERT_TRUE(source.ok());
-    EXPECT_OK(source.status());
+    EXPECT_THAT(source.status(), IsOk());
     EXPECT_DOUBLE_EQ(kI, *source);
   }
 
@@ -655,11 +536,11 @@
     target = std::move(source);
 
     ASSERT_TRUE(target.ok());
-    EXPECT_OK(target.status());
+    EXPECT_THAT(target.status(), IsOk());
     EXPECT_EQ(p, target->get());
 
     ASSERT_TRUE(source.ok());
-    EXPECT_OK(source.status());
+    EXPECT_THAT(source.status(), IsOk());
     EXPECT_EQ(nullptr, source->get());
   }
 }
@@ -1078,7 +959,7 @@
     so = *&so;
 
     ASSERT_TRUE(so.ok());
-    EXPECT_OK(so.status());
+    EXPECT_THAT(so.status(), IsOk());
     EXPECT_EQ(long_str, *so);
   }
 
@@ -1101,7 +982,7 @@
     so = std::move(same);
 
     ASSERT_TRUE(so.ok());
-    EXPECT_OK(so.status());
+    EXPECT_THAT(so.status(), IsOk());
     EXPECT_EQ(17, *so);
   }
 
@@ -1128,7 +1009,7 @@
     so = std::move(same);
 
     ASSERT_TRUE(so.ok());
-    EXPECT_OK(so.status());
+    EXPECT_THAT(so.status(), IsOk());
     EXPECT_EQ(raw, so->get());
   }
 
@@ -1361,7 +1242,7 @@
   {
     absl::StatusOr<const int*> so(&kI);
     EXPECT_TRUE(so.ok());
-    EXPECT_OK(so.status());
+    EXPECT_THAT(so.status(), IsOk());
     EXPECT_EQ(&kI, *so);
   }
 
@@ -1369,7 +1250,7 @@
   {
     absl::StatusOr<const int*> so(nullptr);
     EXPECT_TRUE(so.ok());
-    EXPECT_OK(so.status());
+    EXPECT_THAT(so.status(), IsOk());
     EXPECT_EQ(nullptr, *so);
   }
 
@@ -1379,7 +1260,7 @@
 
     absl::StatusOr<const int*> so(p);
     EXPECT_TRUE(so.ok());
-    EXPECT_OK(so.status());
+    EXPECT_THAT(so.status(), IsOk());
     EXPECT_EQ(nullptr, *so);
   }
 }
@@ -1388,7 +1269,7 @@
   const int kI = 0;
   absl::StatusOr<const int*> original(&kI);
   absl::StatusOr<const int*> copy(original);
-  EXPECT_OK(copy.status());
+  EXPECT_THAT(copy.status(), IsOk());
   EXPECT_EQ(*original, *copy);
 }
 
@@ -1402,7 +1283,7 @@
   Derived derived;
   absl::StatusOr<Derived*> original(&derived);
   absl::StatusOr<Base2*> copy(original);
-  EXPECT_OK(copy.status());
+  EXPECT_THAT(copy.status(), IsOk());
   EXPECT_EQ(static_cast<const Base2*>(*original), *copy);
 }
 
@@ -1417,7 +1298,7 @@
   absl::StatusOr<const int*> source(&kI);
   absl::StatusOr<const int*> target;
   target = source;
-  EXPECT_OK(target.status());
+  EXPECT_THAT(target.status(), IsOk());
   EXPECT_EQ(*source, *target);
 }
 
@@ -1433,7 +1314,7 @@
   absl::StatusOr<Derived*> source(&derived);
   absl::StatusOr<Base2*> target;
   target = source;
-  EXPECT_OK(target.status());
+  EXPECT_THAT(target.status(), IsOk());
   EXPECT_EQ(static_cast<const Base2*>(*source), *target);
 }
 
diff --git a/absl/strings/BUILD.bazel b/absl/strings/BUILD.bazel
index 8e8371b..2cc014e 100644
--- a/absl/strings/BUILD.bazel
+++ b/absl/strings/BUILD.bazel
@@ -77,7 +77,6 @@
         "escaping.h",
         "has_absl_stringify.h",
         "internal/damerau_levenshtein_distance.h",
-        "internal/has_absl_stringify.h",
         "internal/string_constant.h",
         "match.h",
         "numbers.h",
@@ -359,6 +358,7 @@
         "//absl/base:config",
         "//absl/base:core_headers",
         "//absl/base:dynamic_annotations",
+        "//absl/meta:type_traits",
         "@com_google_googletest//:gtest",
         "@com_google_googletest//:gtest_main",
     ],
@@ -599,6 +599,7 @@
         "//absl/functional:function_ref",
         "//absl/meta:type_traits",
         "//absl/numeric:bits",
+        "//absl/types:compare",
         "//absl/types:optional",
         "//absl/types:span",
     ],
@@ -614,8 +615,8 @@
         "//absl:__subpackages__",
     ],
     deps = [
-        "//absl/base",
         "//absl/base:config",
+        "//absl/base:no_destructor",
         "//absl/base:raw_logging_internal",
         "//absl/synchronization",
     ],
@@ -829,7 +830,7 @@
 
 cc_library(
     name = "cord_test_helpers",
-    testonly = 1,
+    testonly = True,
     hdrs = [
         "cord_test_helpers.h",
     ],
@@ -845,7 +846,7 @@
 
 cc_library(
     name = "cord_rep_test_util",
-    testonly = 1,
+    testonly = True,
     hdrs = ["internal/cord_rep_test_util.h"],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
@@ -859,7 +860,7 @@
 
 cc_library(
     name = "cordz_test_helpers",
-    testonly = 1,
+    testonly = True,
     hdrs = ["cordz_test_helpers.h"],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
@@ -899,6 +900,7 @@
 cc_test(
     name = "cord_test",
     size = "medium",
+    timeout = "long",
     srcs = ["cord_test.cc"],
     copts = ABSL_TEST_COPTS,
     visibility = ["//visibility:private"],
@@ -915,12 +917,15 @@
         "//absl/base:config",
         "//absl/base:core_headers",
         "//absl/base:endian",
+        "//absl/base:no_destructor",
         "//absl/container:fixed_array",
         "//absl/functional:function_ref",
         "//absl/hash",
+        "//absl/hash:hash_testing",
         "//absl/log",
         "//absl/log:check",
         "//absl/random",
+        "//absl/types:compare",
         "//absl/types:optional",
         "@com_google_googletest//:gtest",
         "@com_google_googletest//:gtest_main",
@@ -1376,6 +1381,7 @@
 cc_test(
     name = "str_format_convert_test",
     size = "medium",
+    timeout = "long",
     srcs = ["internal/str_format/convert_test.cc"],
     copts = ABSL_TEST_COPTS,
     visibility = ["//visibility:private"],
@@ -1448,7 +1454,7 @@
 
 cc_binary(
     name = "atod_manual_test",
-    testonly = 1,
+    testonly = True,
     srcs = ["atod_manual_test.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
diff --git a/absl/strings/CMakeLists.txt b/absl/strings/CMakeLists.txt
index 1b24536..3a1619e 100644
--- a/absl/strings/CMakeLists.txt
+++ b/absl/strings/CMakeLists.txt
@@ -18,9 +18,9 @@
   NAME
     string_view
   HDRS
-    string_view.h
+    "string_view.h"
   SRCS
-    string_view.cc
+    "string_view.cc"
   COPTS
     ${ABSL_DEFAULT_COPTS}
   DEPS
@@ -42,7 +42,6 @@
     "has_absl_stringify.h"
     "internal/damerau_levenshtein_distance.h"
     "internal/string_constant.h"
-    "internal/has_absl_stringify.h"
     "match.h"
     "numbers.h"
     "str_cat.h"
@@ -274,6 +273,7 @@
     absl::config
     absl::core_headers
     absl::dynamic_annotations
+    absl::type_traits
     GTest::gmock_main
 )
 
@@ -704,6 +704,7 @@
     absl::compressed_tuple
     absl::config
     absl::container_memory
+    absl::compare
     absl::core_headers
     absl::crc_cord_state
     absl::endian
@@ -799,6 +800,7 @@
   DEPS
     absl::base
     absl::config
+    absl::no_destructor
     absl::raw_logging_internal
     absl::synchronization
 )
@@ -1072,6 +1074,8 @@
     absl::fixed_array
     absl::function_ref
     absl::hash
+    absl::hash_testing
+    absl::no_destructor
     absl::log
     absl::optional
     absl::random_random
diff --git a/absl/strings/ascii.cc b/absl/strings/ascii.cc
index 5460b2c..20a696a 100644
--- a/absl/strings/ascii.cc
+++ b/absl/strings/ascii.cc
@@ -15,13 +15,14 @@
 #include "absl/strings/ascii.h"
 
 #include <climits>
-#include <cstdint>
+#include <cstddef>
 #include <cstring>
 #include <string>
-#include <type_traits>
 
+#include "absl/base/attributes.h"
 #include "absl/base/config.h"
 #include "absl/base/nullability.h"
+#include "absl/base/optimization.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
@@ -162,19 +163,6 @@
 };
 // clang-format on
 
-template <class T>
-static constexpr T BroadcastByte(unsigned char value) {
-  static_assert(std::is_integral<T>::value && sizeof(T) <= sizeof(uint64_t) &&
-                    std::is_unsigned<T>::value,
-                "only unsigned integers up to 64-bit allowed");
-  T result = value;
-  constexpr size_t result_bit_width = sizeof(result) * CHAR_BIT;
-  result |= result << ((CHAR_BIT << 0) & (result_bit_width - 1));
-  result |= result << ((CHAR_BIT << 1) & (result_bit_width - 1));
-  result |= result << ((CHAR_BIT << 2) & (result_bit_width - 1));
-  return result;
-}
-
 // Returns whether `c` is in the a-z/A-Z range (w.r.t. `ToUpper`).
 // Implemented by:
 //  1. Pushing the a-z/A-Z range to [SCHAR_MIN, SCHAR_MIN + 26).
@@ -189,47 +177,10 @@
   return static_cast<signed char>(u) < threshold;
 }
 
+// Force-inline so the compiler won't merge the short and long implementations.
 template <bool ToUpper>
-static constexpr char* PartialAsciiStrCaseFold(absl::Nonnull<char*> p,
-                                               absl::Nonnull<char*> end) {
-  using vec_t = size_t;
-  const size_t n = static_cast<size_t>(end - p);
-
-  // SWAR algorithm: http://0x80.pl/notesen/2016-01-06-swar-swap-case.html
-  constexpr char ch_a = ToUpper ? 'a' : 'A', ch_z = ToUpper ? 'z' : 'Z';
-  char* const swar_end = p + (n / sizeof(vec_t)) * sizeof(vec_t);
-  while (p < swar_end) {
-    vec_t v = vec_t();
-
-    // memcpy the vector, but constexpr
-    for (size_t i = 0; i < sizeof(vec_t); ++i) {
-      v |= static_cast<vec_t>(static_cast<unsigned char>(p[i]))
-           << (i * CHAR_BIT);
-    }
-
-    constexpr unsigned int msb = 1u << (CHAR_BIT - 1);
-    const vec_t v_msb = v & BroadcastByte<vec_t>(msb);
-    const vec_t v_nonascii_mask = (v_msb << 1) - (v_msb >> (CHAR_BIT - 1));
-    const vec_t v_nonascii = v & v_nonascii_mask;
-    const vec_t v_ascii = v & ~v_nonascii_mask;
-    const vec_t a = v_ascii + BroadcastByte<vec_t>(msb - ch_a - 0),
-                z = v_ascii + BroadcastByte<vec_t>(msb - ch_z - 1);
-    v = v_nonascii | (v_ascii ^ ((a ^ z) & BroadcastByte<vec_t>(msb)) >> 2);
-
-    // memcpy the vector, but constexpr
-    for (size_t i = 0; i < sizeof(vec_t); ++i) {
-      p[i] = static_cast<char>(v >> (i * CHAR_BIT));
-    }
-
-    p += sizeof(v);
-  }
-
-  return p;
-}
-
-template <bool ToUpper>
-static constexpr void AsciiStrCaseFold(absl::Nonnull<char*> p,
-                                       absl::Nonnull<char*> end) {
+ABSL_ATTRIBUTE_ALWAYS_INLINE inline constexpr void AsciiStrCaseFoldImpl(
+    absl::Nonnull<char*> p, size_t size) {
   // The upper- and lowercase versions of ASCII characters differ by only 1 bit.
   // When we need to flip the case, we can xor with this bit to achieve the
   // desired result. Note that the choice of 'a' and 'A' here is arbitrary. We
@@ -237,20 +188,32 @@
   // have the same single bit difference.
   constexpr unsigned char kAsciiCaseBitFlip = 'a' ^ 'A';
 
-  using vec_t = size_t;
-  // TODO(b/316380338): When FDO becomes able to vectorize these,
-  // revert this manual optimization and just leave the naive loop.
-  if (static_cast<size_t>(end - p) >= sizeof(vec_t)) {
-    p = ascii_internal::PartialAsciiStrCaseFold<ToUpper>(p, end);
-  }
-  while (p < end) {
-    unsigned char v = static_cast<unsigned char>(*p);
+  for (size_t i = 0; i < size; ++i) {
+    unsigned char v = static_cast<unsigned char>(p[i]);
     v ^= AsciiInAZRange<ToUpper>(v) ? kAsciiCaseBitFlip : 0;
-    *p = static_cast<char>(v);
-    ++p;
+    p[i] = static_cast<char>(v);
   }
 }
 
+// The string size threshold for starting using the long string version.
+constexpr size_t kCaseFoldThreshold = 16;
+
+// No-inline so the compiler won't merge the short and long implementations.
+template <bool ToUpper>
+ABSL_ATTRIBUTE_NOINLINE constexpr void AsciiStrCaseFoldLong(
+    absl::Nonnull<char*> p, size_t size) {
+  ABSL_ASSUME(size >= kCaseFoldThreshold);
+  AsciiStrCaseFoldImpl<ToUpper>(p, size);
+}
+
+// Splitting to short and long strings to allow vectorization decisions
+// to be made separately in the long and short cases.
+template <bool ToUpper>
+constexpr void AsciiStrCaseFold(absl::Nonnull<char*> p, size_t size) {
+  size < kCaseFoldThreshold ? AsciiStrCaseFoldImpl<ToUpper>(p, size)
+                            : AsciiStrCaseFoldLong<ToUpper>(p, size);
+}
+
 static constexpr size_t ValidateAsciiCasefold() {
   constexpr size_t num_chars = 1 + CHAR_MAX - CHAR_MIN;
   size_t incorrect_index = 0;
@@ -259,8 +222,8 @@
   for (unsigned int i = 0; i < num_chars; ++i) {
     uppered[i] = lowered[i] = static_cast<char>(i);
   }
-  AsciiStrCaseFold<false>(&lowered[0], &lowered[num_chars]);
-  AsciiStrCaseFold<true>(&uppered[0], &uppered[num_chars]);
+  AsciiStrCaseFold<false>(&lowered[0], num_chars);
+  AsciiStrCaseFold<true>(&uppered[0], num_chars);
   for (size_t i = 0; i < num_chars; ++i) {
     const char ch = static_cast<char>(i),
                ch_upper = ('a' <= ch && ch <= 'z' ? 'A' + (ch - 'a') : ch),
@@ -278,13 +241,11 @@
 }  // namespace ascii_internal
 
 void AsciiStrToLower(absl::Nonnull<std::string*> s) {
-  char* p = &(*s)[0];  // Guaranteed to be valid for empty strings
-  return ascii_internal::AsciiStrCaseFold<false>(p, p + s->size());
+  return ascii_internal::AsciiStrCaseFold<false>(&(*s)[0], s->size());
 }
 
 void AsciiStrToUpper(absl::Nonnull<std::string*> s) {
-  char* p = &(*s)[0];  // Guaranteed to be valid for empty strings
-  return ascii_internal::AsciiStrCaseFold<true>(p, p + s->size());
+  return ascii_internal::AsciiStrCaseFold<true>(&(*s)[0], s->size());
 }
 
 void RemoveExtraAsciiWhitespace(absl::Nonnull<std::string*> str) {
diff --git a/absl/strings/ascii_test.cc b/absl/strings/ascii_test.cc
index 117140c..8885bb1 100644
--- a/absl/strings/ascii_test.cc
+++ b/absl/strings/ascii_test.cc
@@ -190,11 +190,13 @@
   const std::string str("GHIJKL");
   const std::string str2("MNOPQR");
   const absl::string_view sp(str2);
+  const std::string long_str("ABCDEFGHIJKLMNOPQRSTUVWXYZ1!a");
   std::string mutable_str("_`?@[{AMNOPQRSTUVWXYZ");
 
   EXPECT_EQ("abcdef", absl::AsciiStrToLower(buf));
   EXPECT_EQ("ghijkl", absl::AsciiStrToLower(str));
   EXPECT_EQ("mnopqr", absl::AsciiStrToLower(sp));
+  EXPECT_EQ("abcdefghijklmnopqrstuvwxyz1!a", absl::AsciiStrToLower(long_str));
 
   absl::AsciiStrToLower(&mutable_str);
   EXPECT_EQ("_`?@[{amnopqrstuvwxyz", mutable_str);
@@ -210,10 +212,12 @@
   const std::string str("ghijkl");
   const std::string str2("_`?@[{amnopqrstuvwxyz");
   const absl::string_view sp(str2);
+  const std::string long_str("abcdefghijklmnopqrstuvwxyz1!A");
 
   EXPECT_EQ("ABCDEF", absl::AsciiStrToUpper(buf));
   EXPECT_EQ("GHIJKL", absl::AsciiStrToUpper(str));
   EXPECT_EQ("_`?@[{AMNOPQRSTUVWXYZ", absl::AsciiStrToUpper(sp));
+  EXPECT_EQ("ABCDEFGHIJKLMNOPQRSTUVWXYZ1!A", absl::AsciiStrToUpper(long_str));
 
   char mutable_buf[] = "Mutable";
   std::transform(mutable_buf, mutable_buf + strlen(mutable_buf),
diff --git a/absl/strings/cord.cc b/absl/strings/cord.cc
index f67326f..f0f4f31 100644
--- a/absl/strings/cord.cc
+++ b/absl/strings/cord.cc
@@ -75,7 +75,7 @@
 using ::absl::cord_internal::kInlinedVectorSize;
 using ::absl::cord_internal::kMaxBytesToCopy;
 
-static void DumpNode(absl::Nonnull<CordRep*> rep, bool include_data,
+static void DumpNode(absl::Nonnull<CordRep*> nonnull_rep, bool include_data,
                      absl::Nonnull<std::ostream*> os, int indent = 0);
 static bool VerifyNode(absl::Nonnull<CordRep*> root,
                        absl::Nonnull<CordRep*> start_node);
@@ -425,8 +425,8 @@
 // we keep it here to make diffs easier.
 void Cord::InlineRep::AppendArray(absl::string_view src,
                                   MethodIdentifier method) {
-  MaybeRemoveEmptyCrcNode();
   if (src.empty()) return;  // memcpy(_, nullptr, 0) is undefined.
+  MaybeRemoveEmptyCrcNode();
 
   size_t appended = 0;
   CordRep* rep = tree();
@@ -1062,6 +1062,15 @@
   }
 }
 
+void AppendCordToString(const Cord& src, absl::Nonnull<std::string*> dst) {
+  const size_t cur_dst_size = dst->size();
+  const size_t new_dst_size = cur_dst_size + src.size();
+  absl::strings_internal::STLStringResizeUninitializedAmortized(dst,
+                                                                new_dst_size);
+  char* append_ptr = &(*dst)[cur_dst_size];
+  src.CopyToArrayImpl(append_ptr);
+}
+
 void Cord::CopyToArraySlowPath(absl::Nonnull<char*> dst) const {
   assert(contents_.is_tree());
   absl::string_view fragment;
@@ -1448,14 +1457,13 @@
   }
 }
 
-static void DumpNode(absl::Nonnull<CordRep*> rep, bool include_data,
+static void DumpNode(absl::Nonnull<CordRep*> nonnull_rep, bool include_data,
                      absl::Nonnull<std::ostream*> os, int indent) {
+  CordRep* rep = nonnull_rep;
   const int kIndentStep = 1;
-  absl::InlinedVector<CordRep*, kInlinedVectorSize> stack;
-  absl::InlinedVector<int, kInlinedVectorSize> indents;
   for (;;) {
-    *os << std::setw(3) << rep->refcount.Get();
-    *os << " " << std::setw(7) << rep->length;
+    *os << std::setw(3) << (rep == nullptr ? 0 : rep->refcount.Get());
+    *os << " " << std::setw(7) << (rep == nullptr ? 0 : rep->length);
     *os << " [";
     if (include_data) *os << static_cast<void*>(rep);
     *os << "]";
@@ -1477,26 +1485,23 @@
       if (rep->IsExternal()) {
         *os << "EXTERNAL [";
         if (include_data)
-          *os << absl::CEscape(std::string(rep->external()->base, rep->length));
+          *os << absl::CEscape(
+              absl::string_view(rep->external()->base, rep->length));
         *os << "]\n";
       } else if (rep->IsFlat()) {
         *os << "FLAT cap=" << rep->flat()->Capacity() << " [";
         if (include_data)
-          *os << absl::CEscape(std::string(rep->flat()->Data(), rep->length));
+          *os << absl::CEscape(
+              absl::string_view(rep->flat()->Data(), rep->length));
         *os << "]\n";
       } else {
         CordRepBtree::Dump(rep, /*label=*/"", include_data, *os);
       }
     }
     if (leaf) {
-      if (stack.empty()) break;
-      rep = stack.back();
-      stack.pop_back();
-      indent = indents.back();
-      indents.pop_back();
+      break;
     }
   }
-  ABSL_INTERNAL_CHECK(indents.empty(), "");
 }
 
 static std::string ReportError(absl::Nonnull<CordRep*> root,
diff --git a/absl/strings/cord.h b/absl/strings/cord.h
index b3e556b..69aa8ef 100644
--- a/absl/strings/cord.h
+++ b/absl/strings/cord.h
@@ -75,6 +75,7 @@
 #include "absl/base/internal/per_thread_tls.h"
 #include "absl/base/macros.h"
 #include "absl/base/nullability.h"
+#include "absl/base/optimization.h"
 #include "absl/base/port.h"
 #include "absl/container/inlined_vector.h"
 #include "absl/crc/internal/crc_cord_state.h"
@@ -95,6 +96,7 @@
 #include "absl/strings/internal/resize_uninitialized.h"
 #include "absl/strings/internal/string_constant.h"
 #include "absl/strings/string_view.h"
+#include "absl/types/compare.h"
 #include "absl/types/optional.h"
 
 namespace absl {
@@ -104,6 +106,7 @@
 template <typename Releaser>
 Cord MakeCordFromExternal(absl::string_view, Releaser&&);
 void CopyCordToString(const Cord& src, absl::Nonnull<std::string*> dst);
+void AppendCordToString(const Cord& src, absl::Nonnull<std::string*> dst);
 
 // Cord memory accounting modes
 enum class CordMemoryAccounting {
@@ -420,6 +423,18 @@
   friend void CopyCordToString(const Cord& src,
                                absl::Nonnull<std::string*> dst);
 
+  // AppendCordToString()
+  //
+  // Appends the contents of a `src` Cord to a `*dst` string.
+  //
+  // This function optimizes the case of appending to a non-empty destination
+  // string. If `*dst` already has capacity to store the contents of the cord,
+  // this function does not invalidate pointers previously returned by
+  // `dst->data()`. If `*dst` is a new object, prefer to simply use the
+  // conversion operator to `std::string`.
+  friend void AppendCordToString(const Cord& src,
+                                 absl::Nonnull<std::string*> dst);
+
   class CharIterator;
 
   //----------------------------------------------------------------------------
@@ -757,7 +772,7 @@
 
   // Cord::Find()
   //
-  // Returns an iterator to the first occurrance of the substring `needle`.
+  // Returns an iterator to the first occurrence of the substring `needle`.
   //
   // If the substring `needle` does not occur, `Cord::char_end()` is returned.
   CharIterator Find(absl::string_view needle) const;
@@ -835,6 +850,38 @@
   friend bool operator==(const Cord& lhs, const Cord& rhs);
   friend bool operator==(const Cord& lhs, absl::string_view rhs);
 
+#ifdef __cpp_impl_three_way_comparison
+
+  // Cords support comparison with other Cords and string_views via operator<
+  // and others; here we provide a wrapper for the C++20 three-way comparison
+  // <=> operator.
+
+  static inline std::strong_ordering ConvertCompareResultToStrongOrdering(
+      int c) {
+    if (c == 0) {
+      return std::strong_ordering::equal;
+    } else if (c < 0) {
+      return std::strong_ordering::less;
+    } else {
+      return std::strong_ordering::greater;
+    }
+  }
+
+  friend inline std::strong_ordering operator<=>(const Cord& x, const Cord& y) {
+    return ConvertCompareResultToStrongOrdering(x.Compare(y));
+  }
+
+  friend inline std::strong_ordering operator<=>(const Cord& lhs,
+                                                 absl::string_view rhs) {
+    return ConvertCompareResultToStrongOrdering(lhs.Compare(rhs));
+  }
+
+  friend inline std::strong_ordering operator<=>(absl::string_view lhs,
+                                                 const Cord& rhs) {
+    return ConvertCompareResultToStrongOrdering(-rhs.Compare(lhs));
+  }
+#endif
+
   friend absl::Nullable<const CordzInfo*> GetCordzInfoForTesting(
       const Cord& cord);
 
@@ -1065,6 +1112,8 @@
       const;
 
   CharIterator FindImpl(CharIterator it, absl::string_view needle) const;
+
+  void CopyToArrayImpl(absl::Nonnull<char*> dst) const;
 };
 
 ABSL_NAMESPACE_END
@@ -1103,8 +1152,8 @@
 // Overload for function reference types that dispatches using a function
 // pointer because there are no `alignof()` or `sizeof()` a function reference.
 // NOLINTNEXTLINE - suppress clang-tidy raw pointer return.
-inline absl::Nonnull<CordRep*> NewExternalRep(absl::string_view data,
-                               void (&releaser)(absl::string_view)) {
+inline absl::Nonnull<CordRep*> NewExternalRep(
+    absl::string_view data, void (&releaser)(absl::string_view)) {
   return NewExternalRep(data, &releaser);
 }
 
@@ -1120,7 +1169,7 @@
   } else {
     using ReleaserType = absl::decay_t<Releaser>;
     cord_internal::InvokeReleaser(
-        cord_internal::Rank0{}, ReleaserType(std::forward<Releaser>(releaser)),
+        cord_internal::Rank1{}, ReleaserType(std::forward<Releaser>(releaser)),
         data);
   }
   return cord;
@@ -1170,7 +1219,8 @@
   if (rhs == this) {
     return;
   }
-  std::swap(data_, rhs->data_);
+  using std::swap;
+  swap(data_, rhs->data_);
 }
 
 inline absl::Nullable<const char*> Cord::InlineRep::data() const {
@@ -1352,7 +1402,8 @@
   return result;
 }
 
-inline absl::optional<absl::string_view> Cord::TryFlat() const {
+inline absl::optional<absl::string_view> Cord::TryFlat() const
+    ABSL_ATTRIBUTE_LIFETIME_BOUND {
   absl::cord_internal::CordRep* rep = contents_.tree();
   if (rep == nullptr) {
     return absl::string_view(contents_.data(), contents_.size());
@@ -1364,7 +1415,7 @@
   return absl::nullopt;
 }
 
-inline absl::string_view Cord::Flatten() {
+inline absl::string_view Cord::Flatten() ABSL_ATTRIBUTE_LIFETIME_BOUND {
   absl::cord_internal::CordRep* rep = contents_.tree();
   if (rep == nullptr) {
     return absl::string_view(contents_.data(), contents_.size());
@@ -1387,6 +1438,7 @@
 
 inline void Cord::Append(CordBuffer buffer) {
   if (ABSL_PREDICT_FALSE(buffer.length() == 0)) return;
+  contents_.MaybeRemoveEmptyCrcNode();
   absl::string_view short_value;
   if (CordRep* rep = buffer.ConsumeValue(short_value)) {
     contents_.AppendTree(rep, CordzUpdateTracker::kAppendCordBuffer);
@@ -1397,6 +1449,7 @@
 
 inline void Cord::Prepend(CordBuffer buffer) {
   if (ABSL_PREDICT_FALSE(buffer.length() == 0)) return;
+  contents_.MaybeRemoveEmptyCrcNode();
   absl::string_view short_value;
   if (CordRep* rep = buffer.ConsumeValue(short_value)) {
     contents_.PrependTree(rep, CordzUpdateTracker::kPrependCordBuffer);
@@ -1445,6 +1498,14 @@
   return EqualsImpl(rhs, rhs_size);
 }
 
+inline void Cord::CopyToArrayImpl(absl::Nonnull<char*> dst) const {
+  if (!contents_.is_tree()) {
+    if (!empty()) contents_.CopyToArray(dst);
+  } else {
+    CopyToArraySlowPath(dst);
+  }
+}
+
 inline void Cord::ChunkIterator::InitTree(
     absl::Nonnull<cord_internal::CordRep*> tree) {
   tree = cord_internal::SkipCrcNode(tree);
diff --git a/absl/strings/cord_test.cc b/absl/strings/cord_test.cc
index f1a5f39..eaf6d71 100644
--- a/absl/strings/cord_test.cc
+++ b/absl/strings/cord_test.cc
@@ -38,10 +38,12 @@
 #include "absl/base/config.h"
 #include "absl/base/internal/endian.h"
 #include "absl/base/macros.h"
+#include "absl/base/no_destructor.h"
 #include "absl/base/options.h"
 #include "absl/container/fixed_array.h"
 #include "absl/functional/function_ref.h"
 #include "absl/hash/hash.h"
+#include "absl/hash/hash_testing.h"
 #include "absl/log/check.h"
 #include "absl/log/log.h"
 #include "absl/random/random.h"
@@ -58,6 +60,7 @@
 #include "absl/strings/str_cat.h"
 #include "absl/strings/str_format.h"
 #include "absl/strings/string_view.h"
+#include "absl/types/compare.h"
 #include "absl/types/optional.h"
 
 // convenience local constants
@@ -241,12 +244,14 @@
 ABSL_NAMESPACE_END
 }  // namespace absl
 
-// The CordTest fixture runs all tests with and without Cord Btree enabled,
-// and with our without expected CRCs being set on the subject Cords.
-class CordTest : public testing::TestWithParam<int> {
+
+
+// The CordTest fixture runs all tests with and without expected CRCs being set
+// on the subject Cords.
+class CordTest : public testing::TestWithParam<bool /*useCrc*/> {
  public:
-  // Returns true if test is running with btree enabled.
-  bool UseCrc() const { return GetParam() == 2 || GetParam() == 3; }
+  // Returns true if test is running with Crc enabled.
+  bool UseCrc() const { return GetParam(); }
   void MaybeHarden(absl::Cord& c) {
     if (UseCrc()) {
       c.SetExpectedChecksum(1);
@@ -258,20 +263,16 @@
   }
 
   // Returns human readable string representation of the test parameter.
-  static std::string ToString(testing::TestParamInfo<int> param) {
-    switch (param.param) {
-      case 0:
-        return "Btree";
-      case 1:
-        return "BtreeHardened";
-      default:
-        assert(false);
-        return "???";
+  static std::string ToString(testing::TestParamInfo<bool> useCrc) {
+    if (useCrc.param) {
+      return "BtreeHardened";
+    } else {
+      return "Btree";
     }
   }
 };
 
-INSTANTIATE_TEST_SUITE_P(WithParam, CordTest, testing::Values(0, 1),
+INSTANTIATE_TEST_SUITE_P(WithParam, CordTest, testing::Bool(),
                          CordTest::ToString);
 
 TEST(CordRepFlat, AllFlatCapacities) {
@@ -702,6 +703,38 @@
                                 "copying ", "to ", "a ", "string."})));
 }
 
+static void VerifyAppendCordToString(const absl::Cord& cord) {
+  std::string initially_empty;
+  absl::AppendCordToString(cord, &initially_empty);
+  EXPECT_EQ(initially_empty, cord);
+
+  const absl::string_view kInitialContents = "initial contents.";
+  std::string expected_after_append =
+      absl::StrCat(kInitialContents, std::string(cord));
+
+  std::string no_reserve(kInitialContents);
+  absl::AppendCordToString(cord, &no_reserve);
+  EXPECT_EQ(no_reserve, expected_after_append);
+
+  std::string has_reserved_capacity(kInitialContents);
+  has_reserved_capacity.reserve(has_reserved_capacity.size() + cord.size());
+  const char* address_before_copy = has_reserved_capacity.data();
+  absl::AppendCordToString(cord, &has_reserved_capacity);
+  EXPECT_EQ(has_reserved_capacity, expected_after_append);
+  EXPECT_EQ(has_reserved_capacity.data(), address_before_copy)
+      << "AppendCordToString allocated new string storage; "
+         "has_reserved_capacity = \""
+      << has_reserved_capacity << "\"";
+}
+
+TEST_P(CordTest, AppendToString) {
+  VerifyAppendCordToString(absl::Cord());  // empty cords cannot carry CRCs
+  VerifyAppendCordToString(MaybeHardened(absl::Cord("small cord")));
+  VerifyAppendCordToString(MaybeHardened(
+      absl::MakeFragmentedCord({"fragmented ", "cord ", "to ", "test ",
+                                "appending ", "to ", "a ", "string."})));
+}
+
 TEST_P(CordTest, AppendEmptyBuffer) {
   absl::Cord cord;
   cord.Append(absl::CordBuffer());
@@ -1512,12 +1545,11 @@
 // comparison methods from basic_string.
 static void TestCompare(const absl::Cord& c, const absl::Cord& d,
                         RandomEngine* rng) {
-  typedef std::basic_string<uint8_t> ustring;
-  ustring cs(reinterpret_cast<const uint8_t*>(std::string(c).data()), c.size());
-  ustring ds(reinterpret_cast<const uint8_t*>(std::string(d).data()), d.size());
-  // ustring comparison is ideal because we expect Cord comparisons to be
-  // based on unsigned byte comparisons regardless of whether char is signed.
-  int expected = sign(cs.compare(ds));
+  // char_traits<char>::lt is guaranteed to do an unsigned comparison:
+  // https://en.cppreference.com/w/cpp/string/char_traits/cmp. We also expect
+  // Cord comparisons to be based on unsigned byte comparisons regardless of
+  // whether char is signed.
+  int expected = sign(std::string(c).compare(std::string(d)));
   EXPECT_EQ(expected, sign(c.Compare(d))) << c << ", " << d;
 }
 
@@ -2013,6 +2045,26 @@
                 rep2_size);
 }
 
+TEST(CordTest, TestHashFragmentation) {
+  // Make sure we hit these boundary cases precisely.
+  EXPECT_EQ(1024, absl::hash_internal::PiecewiseChunkSize());
+  EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({
+      absl::Cord(),
+      absl::MakeFragmentedCord({std::string(600, 'a'), std::string(600, 'a')}),
+      absl::MakeFragmentedCord({std::string(1200, 'a')}),
+      absl::MakeFragmentedCord({std::string(900, 'b'), std::string(900, 'b')}),
+      absl::MakeFragmentedCord({std::string(1800, 'b')}),
+      absl::MakeFragmentedCord(
+          {std::string(2000, 'c'), std::string(2000, 'c')}),
+      absl::MakeFragmentedCord({std::string(4000, 'c')}),
+      absl::MakeFragmentedCord({std::string(1024, 'd')}),
+      absl::MakeFragmentedCord({std::string(1023, 'd'), "d"}),
+      absl::MakeFragmentedCord({std::string(1025, 'e')}),
+      absl::MakeFragmentedCord({std::string(1024, 'e'), "e"}),
+      absl::MakeFragmentedCord({std::string(1023, 'e'), "e", "e"}),
+  }));
+}
+
 // Regtest for a change that had to be rolled back because it expanded out
 // of the InlineRep too soon, which was observable through MemoryUsage().
 TEST_P(CordTest, CordMemoryUsageInlineRep) {
@@ -2744,34 +2796,15 @@
   absl::string_view expected_;
 };
 
-// Deliberately prevents the destructor for an absl::Cord from running. The cord
-// is accessible via the cord member during the lifetime of the CordLeaker.
-// After the CordLeaker is destroyed, pointers to the cord will remain valid
-// until the CordLeaker's memory is deallocated.
-struct CordLeaker {
-  union {
-    absl::Cord cord;
-  };
-
-  template <typename Str>
-  constexpr explicit CordLeaker(const Str& str) : cord(str) {}
-
-  ~CordLeaker() {
-    // Don't do anything, including running cord's destructor. (cord's
-    // destructor won't run automatically because cord is hidden inside a
-    // union.)
-  }
-};
-
 template <typename Str>
-void TestConstinitConstructor(Str) {
+void TestAfterExit(Str) {
   const auto expected = Str::value;
   // Defined before `cord` to be destroyed after it.
   static AfterExitCordTester exit_tester;  // NOLINT
-  ABSL_CONST_INIT static CordLeaker cord_leaker(Str{});  // NOLINT
+  static absl::NoDestructor<absl::Cord> cord_leaker(Str{});
   // cord_leaker is static, so this reference will remain valid through the end
   // of program execution.
-  static absl::Cord& cord = cord_leaker.cord;
+  static absl::Cord& cord = *cord_leaker;
   static bool init_exit_tester = exit_tester.Set(&cord, expected);
   (void)init_exit_tester;
 
@@ -2823,11 +2856,9 @@
 };
 
 
-TEST_P(CordTest, ConstinitConstructor) {
-  TestConstinitConstructor(
-      absl::strings_internal::MakeStringConstant(ShortView{}));
-  TestConstinitConstructor(
-      absl::strings_internal::MakeStringConstant(LongView{}));
+TEST_P(CordTest, AfterExit) {
+  TestAfterExit(absl::strings_internal::MakeStringConstant(ShortView{}));
+  TestAfterExit(absl::strings_internal::MakeStringConstant(LongView{}));
 }
 
 namespace {
@@ -3253,6 +3284,31 @@
   EXPECT_NE(cord.EstimatedMemoryUsage(), 0);
 }
 
+TEST(CordThreeWayComparisonTest, CompareCords) {
+#ifndef __cpp_impl_three_way_comparison
+  GTEST_SKIP() << "C++20 three-way <=> comparison not supported";
+#else
+  EXPECT_EQ(absl::Cord("a") <=> absl::Cord("a"), std::strong_ordering::equal);
+  EXPECT_EQ(absl::Cord("aaaa") <=> absl::Cord("aaab"),
+            std::strong_ordering::less);
+  EXPECT_EQ(absl::Cord("baaa") <=> absl::Cord("a"),
+            std::strong_ordering::greater);
+#endif
+}
+
+TEST(CordThreeWayComparisonTest, CompareCordsAndStringViews) {
+#ifndef __cpp_impl_three_way_comparison
+  GTEST_SKIP() << "C++20 three-way <=> comparison not supported";
+#else
+  EXPECT_EQ(absl::string_view("a") <=> absl::Cord("a"),
+            std::strong_ordering::equal);
+  EXPECT_EQ(absl::Cord("a") <=> absl::string_view("b"),
+            std::strong_ordering::less);
+  EXPECT_EQ(absl::string_view("b") <=> absl::Cord("a"),
+            std::strong_ordering::greater);
+#endif
+}
+
 #if defined(GTEST_HAS_DEATH_TEST) && defined(ABSL_INTERNAL_CORD_HAVE_SANITIZER)
 
 // Returns an expected poison / uninitialized death message expression.
diff --git a/absl/strings/escaping.cc b/absl/strings/escaping.cc
index 1c0eac4..4ffef94 100644
--- a/absl/strings/escaping.cc
+++ b/absl/strings/escaping.cc
@@ -21,10 +21,12 @@
 #include <cstring>
 #include <limits>
 #include <string>
+#include <utility>
 
 #include "absl/base/config.h"
 #include "absl/base/internal/raw_logging.h"
 #include "absl/base/internal/unaligned_access.h"
+#include "absl/base/nullability.h"
 #include "absl/strings/ascii.h"
 #include "absl/strings/charset.h"
 #include "absl/strings/internal/escaping.h"
@@ -54,7 +56,8 @@
   return x & 0xf;
 }
 
-inline bool IsSurrogate(char32_t c, absl::string_view src, std::string* error) {
+inline bool IsSurrogate(char32_t c, absl::string_view src,
+                        absl::Nullable<std::string*> error) {
   if (c >= 0xD800 && c <= 0xDFFF) {
     if (error) {
       *error = absl::StrCat("invalid surrogate character (0xD800-DFFF): \\",
@@ -83,7 +86,9 @@
 //     UnescapeCEscapeSequences().
 // ----------------------------------------------------------------------
 bool CUnescapeInternal(absl::string_view source, bool leave_nulls_escaped,
-                       char* dest, ptrdiff_t* dest_len, std::string* error) {
+                       absl::Nonnull<char*> dest,
+                       absl::Nonnull<ptrdiff_t*> dest_len,
+                       absl::Nullable<std::string*> error) {
   char* d = dest;
   const char* p = source.data();
   const char* end = p + source.size();
@@ -290,7 +295,8 @@
 //    may be the same.
 // ----------------------------------------------------------------------
 bool CUnescapeInternal(absl::string_view source, bool leave_nulls_escaped,
-                       std::string* dest, std::string* error) {
+                       absl::Nonnull<std::string*> dest,
+                       absl::Nullable<std::string*> error) {
   strings_internal::STLStringResizeUninitialized(dest, source.size());
 
   ptrdiff_t dest_size;
@@ -362,7 +368,7 @@
 }
 
 /* clang-format off */
-constexpr unsigned char c_escaped_len[256] = {
+constexpr unsigned char kCEscapedLen[256] = {
     4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 4, 4, 2, 4, 4,  // \t, \n, \r
     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
     1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1,  // ", '
@@ -387,12 +393,28 @@
 // that UTF-8 bytes are not handled specially.
 inline size_t CEscapedLength(absl::string_view src) {
   size_t escaped_len = 0;
-  for (char c : src)
-    escaped_len += c_escaped_len[static_cast<unsigned char>(c)];
+  // The maximum value of kCEscapedLen[x] is 4, so we can escape any string of
+  // length size_t_max/4 without checking for overflow.
+  size_t unchecked_limit =
+      std::min<size_t>(src.size(), std::numeric_limits<size_t>::max() / 4);
+  size_t i = 0;
+  while (i < unchecked_limit) {
+    // Common case: No need to check for overflow.
+    escaped_len += kCEscapedLen[static_cast<unsigned char>(src[i++])];
+  }
+  while (i < src.size()) {
+    // Beyond unchecked_limit we need to check for overflow before adding.
+    size_t char_len = kCEscapedLen[static_cast<unsigned char>(src[i++])];
+    ABSL_INTERNAL_CHECK(
+        escaped_len <= std::numeric_limits<size_t>::max() - char_len,
+        "escaped_len overflow");
+    escaped_len += char_len;
+  }
   return escaped_len;
 }
 
-void CEscapeAndAppendInternal(absl::string_view src, std::string* dest) {
+void CEscapeAndAppendInternal(absl::string_view src,
+                              absl::Nonnull<std::string*> dest) {
   size_t escaped_len = CEscapedLength(src);
   if (escaped_len == src.size()) {
     dest->append(src.data(), src.size());
@@ -400,12 +422,15 @@
   }
 
   size_t cur_dest_len = dest->size();
+  ABSL_INTERNAL_CHECK(
+      cur_dest_len <= std::numeric_limits<size_t>::max() - escaped_len,
+      "std::string size overflow");
   strings_internal::STLStringResizeUninitialized(dest,
                                                  cur_dest_len + escaped_len);
   char* append_ptr = &(*dest)[cur_dest_len];
 
   for (char c : src) {
-    size_t char_len = c_escaped_len[static_cast<unsigned char>(c)];
+    size_t char_len = kCEscapedLen[static_cast<unsigned char>(c)];
     if (char_len == 1) {
       *append_ptr++ = c;
     } else if (char_len == 2) {
@@ -446,9 +471,10 @@
 
 // Reverses the mapping in Base64EscapeInternal; see that method's
 // documentation for details of the mapping.
-bool Base64UnescapeInternal(const char* src_param, size_t szsrc, char* dest,
-                            size_t szdest, const signed char* unbase64,
-                            size_t* len) {
+bool Base64UnescapeInternal(absl::Nullable<const char*> src_param, size_t szsrc,
+                            absl::Nullable<char*> dest, size_t szdest,
+                            absl::Nonnull<const signed char*> unbase64,
+                            absl::Nonnull<size_t*> len) {
   static const char kPad64Equals = '=';
   static const char kPad64Dot = '.';
 
@@ -784,8 +810,9 @@
 /* clang-format on */
 
 template <typename String>
-bool Base64UnescapeInternal(const char* src, size_t slen, String* dest,
-                            const signed char* unbase64) {
+bool Base64UnescapeInternal(absl::Nullable<const char*> src, size_t slen,
+                            absl::Nonnull<String*> dest,
+                            absl::Nonnull<const signed char*> unbase64) {
   // Determine the size of the output string.  Base64 encodes every 3 bytes into
   // 4 characters.  Any leftover chars are added directly for good measure.
   const size_t dest_len = 3 * (slen / 4) + (slen % 4);
@@ -829,13 +856,32 @@
     0,  0,  0,  0,  0,  0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 };
 
+constexpr signed char kHexValueStrict[256] = {
+    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+     0,  1,  2,  3,  4,  5,  6,  7,  8,  9, -1, -1, -1, -1, -1, -1,  // '0'..'9'
+    -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1,  // 'A'..'F'
+    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+    -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1,  // 'a'..'f'
+    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+};
 /* clang-format on */
 
 // This is a templated function so that T can be either a char*
 // or a string.  This works because we use the [] operator to access
 // individual characters at a time.
 template <typename T>
-void HexStringToBytesInternal(const char* from, T to, size_t num) {
+void HexStringToBytesInternal(absl::Nullable<const char*> from, T to,
+                              size_t num) {
   for (size_t i = 0; i < num; i++) {
     to[i] = static_cast<char>(kHexValueLenient[from[i * 2] & 0xFF] << 4) +
             (kHexValueLenient[from[i * 2 + 1] & 0xFF]);
@@ -845,7 +891,8 @@
 // This is a templated function so that T can be either a char* or a
 // std::string.
 template <typename T>
-void BytesToHexStringInternal(const unsigned char* src, T dest, size_t num) {
+void BytesToHexStringInternal(absl::Nullable<const unsigned char*> src, T dest,
+                              size_t num) {
   auto dest_ptr = &dest[0];
   for (auto src_ptr = src; src_ptr != (src + num); ++src_ptr, dest_ptr += 2) {
     const char* hex_p = &numbers_internal::kHexTable[*src_ptr * 2];
@@ -860,8 +907,8 @@
 //
 // See CUnescapeInternal() for implementation details.
 // ----------------------------------------------------------------------
-bool CUnescape(absl::string_view source, std::string* dest,
-               std::string* error) {
+bool CUnescape(absl::string_view source, absl::Nonnull<std::string*> dest,
+               absl::Nullable<std::string*> error) {
   return CUnescapeInternal(source, kUnescapeNulls, dest, error);
 }
 
@@ -883,21 +930,23 @@
   return CEscapeInternal(src, true, true);
 }
 
-bool Base64Unescape(absl::string_view src, std::string* dest) {
+bool Base64Unescape(absl::string_view src, absl::Nonnull<std::string*> dest) {
   return Base64UnescapeInternal(src.data(), src.size(), dest, kUnBase64);
 }
 
-bool WebSafeBase64Unescape(absl::string_view src, std::string* dest) {
+bool WebSafeBase64Unescape(absl::string_view src,
+                           absl::Nonnull<std::string*> dest) {
   return Base64UnescapeInternal(src.data(), src.size(), dest, kUnWebSafeBase64);
 }
 
-void Base64Escape(absl::string_view src, std::string* dest) {
+void Base64Escape(absl::string_view src, absl::Nonnull<std::string*> dest) {
   strings_internal::Base64EscapeInternal(
       reinterpret_cast<const unsigned char*>(src.data()), src.size(), dest,
       true, strings_internal::kBase64Chars);
 }
 
-void WebSafeBase64Escape(absl::string_view src, std::string* dest) {
+void WebSafeBase64Escape(absl::string_view src,
+                         absl::Nonnull<std::string*> dest) {
   strings_internal::Base64EscapeInternal(
       reinterpret_cast<const unsigned char*>(src.data()), src.size(), dest,
       false, strings_internal::kWebSafeBase64Chars);
@@ -919,6 +968,32 @@
   return dest;
 }
 
+bool HexStringToBytes(absl::string_view hex,
+                      absl::Nonnull<std::string*> bytes) {
+  std::string output;
+
+  size_t num_bytes = hex.size() / 2;
+  if (hex.size() != num_bytes * 2) {
+    return false;
+  }
+
+  absl::strings_internal::STLStringResizeUninitialized(&output, num_bytes);
+  auto hex_p = hex.cbegin();
+  for (std::string::iterator bin_p = output.begin(); bin_p != output.end();
+       ++bin_p) {
+    int h1 = absl::kHexValueStrict[static_cast<size_t>(*hex_p++)];
+    int h2 = absl::kHexValueStrict[static_cast<size_t>(*hex_p++)];
+    if (h1 == -1 || h2 == -1) {
+      output.resize(static_cast<size_t>(bin_p - output.begin()));
+      return false;
+    }
+    *bin_p = static_cast<char>((h1 << 4) + h2);
+  }
+
+  *bytes = std::move(output);
+  return true;
+}
+
 std::string HexStringToBytes(absl::string_view from) {
   std::string result;
   const auto num = from.size() / 2;
diff --git a/absl/strings/escaping.h b/absl/strings/escaping.h
index bf2a589..3f34fbf 100644
--- a/absl/strings/escaping.h
+++ b/absl/strings/escaping.h
@@ -27,7 +27,9 @@
 #include <string>
 #include <vector>
 
+#include "absl/base/attributes.h"
 #include "absl/base/macros.h"
+#include "absl/base/nullability.h"
 #include "absl/strings/ascii.h"
 #include "absl/strings/str_join.h"
 #include "absl/strings/string_view.h"
@@ -65,14 +67,16 @@
 //
 //   std::string s = "foo\\rbar\\nbaz\\t";
 //   std::string unescaped_s;
-//   if (!absl::CUnescape(s, &unescaped_s) {
+//   if (!absl::CUnescape(s, &unescaped_s)) {
 //     ...
 //   }
 //   EXPECT_EQ(unescaped_s, "foo\rbar\nbaz\t");
-bool CUnescape(absl::string_view source, std::string* dest, std::string* error);
+bool CUnescape(absl::string_view source, absl::Nonnull<std::string*> dest,
+               absl::Nullable<std::string*> error);
 
 // Overload of `CUnescape()` with no error reporting.
-inline bool CUnescape(absl::string_view source, std::string* dest) {
+inline bool CUnescape(absl::string_view source,
+                      absl::Nonnull<std::string*> dest) {
   return CUnescape(source, dest, nullptr);
 }
 
@@ -122,7 +126,7 @@
 // Encodes a `src` string into a base64-encoded 'dest' string with padding
 // characters. This function conforms with RFC 4648 section 4 (base64) and RFC
 // 2045.
-void Base64Escape(absl::string_view src, std::string* dest);
+void Base64Escape(absl::string_view src, absl::Nonnull<std::string*> dest);
 std::string Base64Escape(absl::string_view src);
 
 // WebSafeBase64Escape()
@@ -130,7 +134,8 @@
 // Encodes a `src` string into a base64 string, like Base64Escape() does, but
 // outputs '-' instead of '+' and '_' instead of '/', and does not pad 'dest'.
 // This function conforms with RFC 4648 section 5 (base64url).
-void WebSafeBase64Escape(absl::string_view src, std::string* dest);
+void WebSafeBase64Escape(absl::string_view src,
+                         absl::Nonnull<std::string*> dest);
 std::string WebSafeBase64Escape(absl::string_view src);
 
 // Base64Unescape()
@@ -140,7 +145,7 @@
 // `src` contains invalid characters, `dest` is cleared and returns `false`.
 // If padding is included (note that `Base64Escape()` does produce it), it must
 // be correct. In the padding, '=' and '.' are treated identically.
-bool Base64Unescape(absl::string_view src, std::string* dest);
+bool Base64Unescape(absl::string_view src, absl::Nonnull<std::string*> dest);
 
 // WebSafeBase64Unescape()
 //
@@ -149,12 +154,24 @@
 // invalid characters, `dest` is cleared and returns `false`. If padding is
 // included (note that `WebSafeBase64Escape()` does not produce it), it must be
 // correct. In the padding, '=' and '.' are treated identically.
-bool WebSafeBase64Unescape(absl::string_view src, std::string* dest);
+bool WebSafeBase64Unescape(absl::string_view src,
+                           absl::Nonnull<std::string*> dest);
+
+// HexStringToBytes()
+//
+// Converts the hexadecimal encoded data in `hex` into raw bytes in the `bytes`
+// output string.  If `hex` does not consist of valid hexadecimal data, this
+// function returns false and leaves `bytes` in an unspecified state. Returns
+// true on success.
+ABSL_MUST_USE_RESULT bool HexStringToBytes(absl::string_view hex,
+                                           absl::Nonnull<std::string*> bytes);
 
 // HexStringToBytes()
 //
 // Converts an ASCII hex string into bytes, returning binary data of length
-// `from.size()/2`.
+// `from.size()/2`. The input must be valid hexadecimal data, otherwise the
+// return value is unspecified.
+ABSL_DEPRECATED("Use the HexStringToBytes() that returns a bool")
 std::string HexStringToBytes(absl::string_view from);
 
 // BytesToHexString()
diff --git a/absl/strings/escaping_test.cc b/absl/strings/escaping_test.cc
index ca1ee45..25cb685 100644
--- a/absl/strings/escaping_test.cc
+++ b/absl/strings/escaping_test.cc
@@ -689,6 +689,42 @@
   EXPECT_EQ(huge, unescaped);
 }
 
+TEST(Escaping, HexStringToBytesBackToHex) {
+  std::string bytes, hex;
+
+  constexpr absl::string_view kTestHexLower =  "1c2f0032f40123456789abcdef";
+  constexpr absl::string_view kTestHexUpper =  "1C2F0032F40123456789ABCDEF";
+  constexpr absl::string_view kTestBytes = absl::string_view(
+      "\x1c\x2f\x00\x32\xf4\x01\x23\x45\x67\x89\xab\xcd\xef", 13);
+
+  EXPECT_TRUE(absl::HexStringToBytes(kTestHexLower, &bytes));
+  EXPECT_EQ(bytes, kTestBytes);
+
+  EXPECT_TRUE(absl::HexStringToBytes(kTestHexUpper, &bytes));
+  EXPECT_EQ(bytes, kTestBytes);
+
+  hex = absl::BytesToHexString(kTestBytes);
+  EXPECT_EQ(hex, kTestHexLower);
+
+  // Same buffer.
+  // We do not care if this works since we do not promise it in the contract.
+  // The purpose of this test is to to see if the program will crash or if
+  // sanitizers will catch anything.
+  bytes = std::string(kTestHexUpper);
+  (void)absl::HexStringToBytes(bytes, &bytes);
+
+  // Length not a multiple of two.
+  EXPECT_FALSE(absl::HexStringToBytes("1c2f003", &bytes));
+
+  // Not hex.
+  EXPECT_FALSE(absl::HexStringToBytes("1c2f00ft", &bytes));
+
+  // Empty input.
+  bytes = "abc";
+  EXPECT_TRUE(absl::HexStringToBytes("", &bytes));
+  EXPECT_EQ("", bytes);  // Results in empty output.
+}
+
 TEST(HexAndBack, HexStringToBytes_and_BytesToHexString) {
   std::string hex_mixed = "0123456789abcdefABCDEF";
   std::string bytes_expected = "\x01\x23\x45\x67\x89\xab\xcd\xef\xAB\xCD\xEF";
diff --git a/absl/strings/has_absl_stringify.h b/absl/strings/has_absl_stringify.h
index 274a786..9af0191 100644
--- a/absl/strings/has_absl_stringify.h
+++ b/absl/strings/has_absl_stringify.h
@@ -18,6 +18,7 @@
 #include <type_traits>
 #include <utility>
 
+#include "absl/base/config.h"
 #include "absl/strings/string_view.h"
 
 namespace absl {
diff --git a/absl/strings/internal/charconv_bigint.h b/absl/strings/internal/charconv_bigint.h
index 5c0c375..cb29767 100644
--- a/absl/strings/internal/charconv_bigint.h
+++ b/absl/strings/internal/charconv_bigint.h
@@ -109,7 +109,17 @@
       size_ = (std::min)(size_ + word_shift, max_words);
       count %= 32;
       if (count == 0) {
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=warray-bounds
+// shows a lot of bogus -Warray-bounds warnings under GCC.
+// This is not the only one in Abseil.
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(14, 0)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Warray-bounds"
+#endif
         std::copy_backward(words_, words_ + size_ - word_shift, words_ + size_);
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(14, 0)
+#pragma GCC diagnostic pop
+#endif
       } else {
         for (int i = (std::min)(size_, max_words - 1); i > word_shift; --i) {
           words_[i] = (words_[i - word_shift] << count) |
diff --git a/absl/strings/internal/cord_internal.h b/absl/strings/internal/cord_internal.h
index 8744540..f0060f1 100644
--- a/absl/strings/internal/cord_internal.h
+++ b/absl/strings/internal/cord_internal.h
@@ -85,7 +85,7 @@
 };
 
 // Emits a fatal error "Unexpected node type: xyz" and aborts the program.
-ABSL_ATTRIBUTE_NORETURN void LogFatalNodeType(CordRep* rep);
+[[noreturn]] void LogFatalNodeType(CordRep* rep);
 
 // Fast implementation of memmove for up to 15 bytes. This implementation is
 // safe for overlapping regions. If nullify_tail is true, the destination is
@@ -259,7 +259,7 @@
   // on the specific layout of these fields. Notably: the non-trivial field
   // `refcount` being preceded by `length`, and being tailed by POD data
   // members only.
-  // # LINT.IfChange
+  // LINT.IfChange
   size_t length;
   RefcountAndFlags refcount;
   // If tag < FLAT, it represents CordRepKind and indicates the type of node.
@@ -275,7 +275,7 @@
   // allocate room for these in the derived class, as not all compilers reuse
   // padding space from the base class (clang and gcc do, MSVC does not, etc)
   uint8_t storage[3];
-  // # LINT.ThenChange(cord_rep_btree.h:copy_raw)
+  // LINT.ThenChange(cord_rep_btree.h:copy_raw)
 
   // Returns true if this instance's tag matches the requested type.
   constexpr bool IsSubstring() const { return tag == SUBSTRING; }
@@ -352,18 +352,19 @@
   static void Delete(CordRep* rep);
 };
 
-struct Rank1 {};
-struct Rank0 : Rank1 {};
+// Use go/ranked-overloads for dispatching.
+struct Rank0 {};
+struct Rank1 : Rank0 {};
 
 template <typename Releaser, typename = ::absl::base_internal::invoke_result_t<
                                  Releaser, absl::string_view>>
-void InvokeReleaser(Rank0, Releaser&& releaser, absl::string_view data) {
+void InvokeReleaser(Rank1, Releaser&& releaser, absl::string_view data) {
   ::absl::base_internal::invoke(std::forward<Releaser>(releaser), data);
 }
 
 template <typename Releaser,
           typename = ::absl::base_internal::invoke_result_t<Releaser>>
-void InvokeReleaser(Rank1, Releaser&& releaser, absl::string_view) {
+void InvokeReleaser(Rank0, Releaser&& releaser, absl::string_view) {
   ::absl::base_internal::invoke(std::forward<Releaser>(releaser));
 }
 
@@ -381,7 +382,7 @@
   }
 
   ~CordRepExternalImpl() {
-    InvokeReleaser(Rank0{}, std::move(this->template get<0>()),
+    InvokeReleaser(Rank1{}, std::move(this->template get<0>()),
                    absl::string_view(base, length));
   }
 
@@ -398,7 +399,6 @@
   assert(pos < child->length);
   assert(n <= child->length - pos);
 
-  // TODO(b/217376272): Harden internal logic.
   // Move to strategical places inside the Cord logic and make this an assert.
   if (ABSL_PREDICT_FALSE(!(child->IsExternal() || child->IsFlat()))) {
     LogFatalNodeType(child);
@@ -520,6 +520,7 @@
 
   constexpr InlineData(const InlineData& rhs) noexcept;
   InlineData& operator=(const InlineData& rhs) noexcept;
+  friend void swap(InlineData& lhs, InlineData& rhs) noexcept;
 
   friend bool operator==(const InlineData& lhs, const InlineData& rhs) {
 #ifdef ABSL_INTERNAL_CORD_HAVE_SANITIZER
@@ -770,6 +771,12 @@
       char data[kMaxInline + 1];
       AsTree as_tree;
     };
+
+    // TODO(b/145829486): see swap(InlineData, InlineData) for more info.
+    inline void SwapValue(Rep rhs, Rep& refrhs) {
+      memcpy(&refrhs, this, sizeof(*this));
+      memcpy(this, &rhs, sizeof(*this));
+    }
   };
 
   // Private implementation of `Compare()`
@@ -884,6 +891,19 @@
   }
 }
 
+inline void swap(InlineData& lhs, InlineData& rhs) noexcept {
+  lhs.unpoison();
+  rhs.unpoison();
+  // TODO(b/145829486): `std::swap(lhs.rep_, rhs.rep_)` results in bad codegen
+  // on clang, spilling the temporary swap value on the stack. Since `Rep` is
+  // trivial, we can make clang DTRT by calling a hand-rolled `SwapValue` where
+  // we pass `rhs` both by value (register allocated) and by reference. The IR
+  // then folds and inlines correctly into an optimized swap without spill.
+  lhs.rep_.SwapValue(rhs.rep_, rhs.rep_);
+  rhs.poison();
+  lhs.poison();
+}
+
 }  // namespace cord_internal
 
 ABSL_NAMESPACE_END
diff --git a/absl/strings/internal/cord_rep_btree.h b/absl/strings/internal/cord_rep_btree.h
index be94b62..ab259af 100644
--- a/absl/strings/internal/cord_rep_btree.h
+++ b/absl/strings/internal/cord_rep_btree.h
@@ -684,14 +684,14 @@
   // except `refcount` is trivially copyable, and the compiler does not
   // efficiently coalesce member-wise copy of these members.
   // See https://gcc.godbolt.org/z/qY8zsca6z
-  // # LINT.IfChange(copy_raw)
+  // LINT.IfChange(copy_raw)
   tree->length = new_length;
   uint8_t* dst = &tree->tag;
   const uint8_t* src = &tag;
   const ptrdiff_t offset = src - reinterpret_cast<const uint8_t*>(this);
   memcpy(dst, src, sizeof(CordRepBtree) - static_cast<size_t>(offset));
   return tree;
-  // # LINT.ThenChange()
+  // LINT.ThenChange()
 }
 
 inline CordRepBtree* CordRepBtree::Copy() const {
diff --git a/absl/strings/internal/cordz_functions.cc b/absl/strings/internal/cordz_functions.cc
index 20d314f..6033d04 100644
--- a/absl/strings/internal/cordz_functions.cc
+++ b/absl/strings/internal/cordz_functions.cc
@@ -40,13 +40,15 @@
 // Special negative 'not initialized' per thread value for cordz_next_sample.
 static constexpr int64_t kInitCordzNextSample = -1;
 
-ABSL_CONST_INIT thread_local int64_t cordz_next_sample = kInitCordzNextSample;
+ABSL_CONST_INIT thread_local SamplingState cordz_next_sample = {
+    kInitCordzNextSample, 1};
 
 // kIntervalIfDisabled is the number of profile-eligible events need to occur
 // before the code will confirm that cordz is still disabled.
 constexpr int64_t kIntervalIfDisabled = 1 << 16;
 
-ABSL_ATTRIBUTE_NOINLINE bool cordz_should_profile_slow() {
+ABSL_ATTRIBUTE_NOINLINE int64_t
+cordz_should_profile_slow(SamplingState& state) {
 
   thread_local absl::profiling_internal::ExponentialBiased
       exponential_biased_generator;
@@ -55,30 +57,34 @@
   // Check if we disabled profiling. If so, set the next sample to a "large"
   // number to minimize the overhead of the should_profile codepath.
   if (mean_interval <= 0) {
-    cordz_next_sample = kIntervalIfDisabled;
-    return false;
+    state = {kIntervalIfDisabled, kIntervalIfDisabled};
+    return 0;
   }
 
   // Check if we're always sampling.
   if (mean_interval == 1) {
-    cordz_next_sample = 1;
-    return true;
+    state = {1, 1};
+    return 1;
   }
 
-  if (cordz_next_sample <= 0) {
+  if (cordz_next_sample.next_sample <= 0) {
     // If first check on current thread, check cordz_should_profile()
     // again using the created (initial) stride in cordz_next_sample.
-    const bool initialized = cordz_next_sample != kInitCordzNextSample;
-    cordz_next_sample = exponential_biased_generator.GetStride(mean_interval);
-    return initialized || cordz_should_profile();
+    const bool initialized =
+        cordz_next_sample.next_sample != kInitCordzNextSample;
+    auto old_stride = state.sample_stride;
+    auto stride = exponential_biased_generator.GetStride(mean_interval);
+    state = {stride, stride};
+    bool should_sample = initialized || cordz_should_profile() > 0;
+    return should_sample ? old_stride : 0;
   }
 
-  --cordz_next_sample;
-  return false;
+  --state.next_sample;
+  return 0;
 }
 
 void cordz_set_next_sample_for_testing(int64_t next_sample) {
-  cordz_next_sample = next_sample;
+  cordz_next_sample = {next_sample, next_sample};
 }
 
 #endif  // ABSL_INTERNAL_CORDZ_ENABLED
diff --git a/absl/strings/internal/cordz_functions.h b/absl/strings/internal/cordz_functions.h
index ed108bf..84c185e 100644
--- a/absl/strings/internal/cordz_functions.h
+++ b/absl/strings/internal/cordz_functions.h
@@ -41,23 +41,33 @@
 
 #ifdef ABSL_INTERNAL_CORDZ_ENABLED
 
+struct SamplingState {
+  int64_t next_sample;
+  int64_t sample_stride;
+};
+
 // cordz_next_sample is the number of events until the next sample event. If
 // the value is 1 or less, the code will check on the next event if cordz is
 // enabled, and if so, will sample the Cord. cordz is only enabled when we can
 // use thread locals.
-ABSL_CONST_INIT extern thread_local int64_t cordz_next_sample;
+ABSL_CONST_INIT extern thread_local SamplingState cordz_next_sample;
 
-// Determines if the next sample should be profiled. If it is, the value pointed
-// at by next_sample will be set with the interval until the next sample.
-bool cordz_should_profile_slow();
+// Determines if the next sample should be profiled.
+// Returns:
+//   0: Do not sample
+//  >0: Sample with the stride of the last sampling period
+int64_t cordz_should_profile_slow(SamplingState& state);
 
-// Returns true if the next cord should be sampled.
-inline bool cordz_should_profile() {
-  if (ABSL_PREDICT_TRUE(cordz_next_sample > 1)) {
-    cordz_next_sample--;
-    return false;
+// Determines if the next sample should be profiled.
+// Returns:
+//   0: Do not sample
+//  >0: Sample with the stride of the last sampling period
+inline int64_t cordz_should_profile() {
+  if (ABSL_PREDICT_TRUE(cordz_next_sample.next_sample > 1)) {
+    cordz_next_sample.next_sample--;
+    return 0;
   }
-  return cordz_should_profile_slow();
+  return cordz_should_profile_slow(cordz_next_sample);
 }
 
 // Sets the interval until the next sample (for testing only)
@@ -65,7 +75,7 @@
 
 #else  // ABSL_INTERNAL_CORDZ_ENABLED
 
-inline bool cordz_should_profile() { return false; }
+inline int64_t cordz_should_profile() { return 0; }
 inline void cordz_set_next_sample_for_testing(int64_t) {}
 
 #endif  // ABSL_INTERNAL_CORDZ_ENABLED
diff --git a/absl/strings/internal/cordz_functions_test.cc b/absl/strings/internal/cordz_functions_test.cc
index b70a685..8fb93d5 100644
--- a/absl/strings/internal/cordz_functions_test.cc
+++ b/absl/strings/internal/cordz_functions_test.cc
@@ -47,9 +47,9 @@
 
   set_cordz_mean_interval(0);
   cordz_set_next_sample_for_testing(0);
-  EXPECT_FALSE(cordz_should_profile());
+  EXPECT_EQ(cordz_should_profile(), 0);
   // 1 << 16 is from kIntervalIfDisabled in cordz_functions.cc.
-  EXPECT_THAT(cordz_next_sample, Eq(1 << 16));
+  EXPECT_THAT(cordz_next_sample.next_sample, Eq(1 << 16));
 
   set_cordz_mean_interval(orig_sample_rate);
 }
@@ -59,8 +59,8 @@
 
   set_cordz_mean_interval(1);
   cordz_set_next_sample_for_testing(1);
-  EXPECT_TRUE(cordz_should_profile());
-  EXPECT_THAT(cordz_next_sample, Le(1));
+  EXPECT_GT(cordz_should_profile(), 0);
+  EXPECT_THAT(cordz_next_sample.next_sample, Le(1));
 
   set_cordz_mean_interval(orig_sample_rate);
 }
@@ -74,9 +74,7 @@
   do {
     ++tries;
     ASSERT_THAT(tries, Le(1000));
-    std::thread thread([&sampled] {
-      sampled = cordz_should_profile();
-    });
+    std::thread thread([&sampled] { sampled = cordz_should_profile() > 0; });
     thread.join();
   } while (sampled);
 }
@@ -94,7 +92,7 @@
     // new value for next_sample each iteration.
     cordz_set_next_sample_for_testing(0);
     cordz_should_profile();
-    sum_of_intervals += cordz_next_sample;
+    sum_of_intervals += cordz_next_sample.next_sample;
   }
 
   // The sum of independent exponential variables is an Erlang distribution,
diff --git a/absl/strings/internal/cordz_handle.cc b/absl/strings/internal/cordz_handle.cc
index a7061db..53d5f52 100644
--- a/absl/strings/internal/cordz_handle.cc
+++ b/absl/strings/internal/cordz_handle.cc
@@ -16,6 +16,7 @@
 #include <atomic>
 
 #include "absl/base/internal/raw_logging.h"  // For ABSL_RAW_CHECK
+#include "absl/base/no_destructor.h"
 #include "absl/synchronization/mutex.h"
 
 namespace absl {
@@ -43,33 +44,32 @@
   }
 };
 
-static Queue* GlobalQueue() {
-  static Queue* global_queue = new Queue;
-  return global_queue;
+static Queue& GlobalQueue() {
+  static absl::NoDestructor<Queue> global_queue;
+  return *global_queue;
 }
 
 }  // namespace
 
 CordzHandle::CordzHandle(bool is_snapshot) : is_snapshot_(is_snapshot) {
-  Queue* global_queue = GlobalQueue();
+  Queue& global_queue = GlobalQueue();
   if (is_snapshot) {
-    MutexLock lock(&global_queue->mutex);
-    CordzHandle* dq_tail =
-        global_queue->dq_tail.load(std::memory_order_acquire);
+    MutexLock lock(&global_queue.mutex);
+    CordzHandle* dq_tail = global_queue.dq_tail.load(std::memory_order_acquire);
     if (dq_tail != nullptr) {
       dq_prev_ = dq_tail;
       dq_tail->dq_next_ = this;
     }
-    global_queue->dq_tail.store(this, std::memory_order_release);
+    global_queue.dq_tail.store(this, std::memory_order_release);
   }
 }
 
 CordzHandle::~CordzHandle() {
-  Queue* global_queue = GlobalQueue();
+  Queue& global_queue = GlobalQueue();
   if (is_snapshot_) {
     std::vector<CordzHandle*> to_delete;
     {
-      MutexLock lock(&global_queue->mutex);
+      MutexLock lock(&global_queue.mutex);
       CordzHandle* next = dq_next_;
       if (dq_prev_ == nullptr) {
         // We were head of the queue, delete every CordzHandle until we reach
@@ -85,7 +85,7 @@
       if (next) {
         next->dq_prev_ = dq_prev_;
       } else {
-        global_queue->dq_tail.store(dq_prev_, std::memory_order_release);
+        global_queue.dq_tail.store(dq_prev_, std::memory_order_release);
       }
     }
     for (CordzHandle* handle : to_delete) {
@@ -95,20 +95,20 @@
 }
 
 bool CordzHandle::SafeToDelete() const {
-  return is_snapshot_ || GlobalQueue()->IsEmpty();
+  return is_snapshot_ || GlobalQueue().IsEmpty();
 }
 
 void CordzHandle::Delete(CordzHandle* handle) {
   assert(handle);
   if (handle) {
-    Queue* const queue = GlobalQueue();
+    Queue& queue = GlobalQueue();
     if (!handle->SafeToDelete()) {
-      MutexLock lock(&queue->mutex);
-      CordzHandle* dq_tail = queue->dq_tail.load(std::memory_order_acquire);
+      MutexLock lock(&queue.mutex);
+      CordzHandle* dq_tail = queue.dq_tail.load(std::memory_order_acquire);
       if (dq_tail != nullptr) {
         handle->dq_prev_ = dq_tail;
         dq_tail->dq_next_ = handle;
-        queue->dq_tail.store(handle, std::memory_order_release);
+        queue.dq_tail.store(handle, std::memory_order_release);
         return;
       }
     }
@@ -118,9 +118,9 @@
 
 std::vector<const CordzHandle*> CordzHandle::DiagnosticsGetDeleteQueue() {
   std::vector<const CordzHandle*> handles;
-  Queue* global_queue = GlobalQueue();
-  MutexLock lock(&global_queue->mutex);
-  CordzHandle* dq_tail = global_queue->dq_tail.load(std::memory_order_acquire);
+  Queue& global_queue = GlobalQueue();
+  MutexLock lock(&global_queue.mutex);
+  CordzHandle* dq_tail = global_queue.dq_tail.load(std::memory_order_acquire);
   for (const CordzHandle* p = dq_tail; p; p = p->dq_prev_) {
     handles.push_back(p);
   }
@@ -133,9 +133,9 @@
   if (handle == nullptr) return true;
   if (handle->is_snapshot_) return false;
   bool snapshot_found = false;
-  Queue* global_queue = GlobalQueue();
-  MutexLock lock(&global_queue->mutex);
-  for (const CordzHandle* p = global_queue->dq_tail; p; p = p->dq_prev_) {
+  Queue& global_queue = GlobalQueue();
+  MutexLock lock(&global_queue.mutex);
+  for (const CordzHandle* p = global_queue.dq_tail; p; p = p->dq_prev_) {
     if (p == handle) return !snapshot_found;
     if (p == this) snapshot_found = true;
   }
@@ -150,8 +150,8 @@
     return handles;
   }
 
-  Queue* global_queue = GlobalQueue();
-  MutexLock lock(&global_queue->mutex);
+  Queue& global_queue = GlobalQueue();
+  MutexLock lock(&global_queue.mutex);
   for (const CordzHandle* p = dq_next_; p != nullptr; p = p->dq_next_) {
     if (!p->is_snapshot()) {
       handles.push_back(p);
diff --git a/absl/strings/internal/cordz_info.cc b/absl/strings/internal/cordz_info.cc
index b24c3da..b7c7fed 100644
--- a/absl/strings/internal/cordz_info.cc
+++ b/absl/strings/internal/cordz_info.cc
@@ -14,6 +14,8 @@
 
 #include "absl/strings/internal/cordz_info.h"
 
+#include <cstdint>
+
 #include "absl/base/config.h"
 #include "absl/base/internal/spinlock.h"
 #include "absl/container/inlined_vector.h"
@@ -247,10 +249,12 @@
   return next;
 }
 
-void CordzInfo::TrackCord(InlineData& cord, MethodIdentifier method) {
+void CordzInfo::TrackCord(InlineData& cord, MethodIdentifier method,
+                          int64_t sampling_stride) {
   assert(cord.is_tree());
   assert(!cord.is_profiled());
-  CordzInfo* cordz_info = new CordzInfo(cord.as_tree(), nullptr, method);
+  CordzInfo* cordz_info =
+      new CordzInfo(cord.as_tree(), nullptr, method, sampling_stride);
   cord.set_cordz_info(cordz_info);
   cordz_info->Track();
 }
@@ -266,7 +270,8 @@
   if (cordz_info != nullptr) cordz_info->Untrack();
 
   // Start new cord sample
-  cordz_info = new CordzInfo(cord.as_tree(), src.cordz_info(), method);
+  cordz_info = new CordzInfo(cord.as_tree(), src.cordz_info(), method,
+                             src.cordz_info()->sampling_stride());
   cord.set_cordz_info(cordz_info);
   cordz_info->Track();
 }
@@ -298,9 +303,8 @@
   return src->stack_depth_;
 }
 
-CordzInfo::CordzInfo(CordRep* rep,
-                     const CordzInfo* src,
-                     MethodIdentifier method)
+CordzInfo::CordzInfo(CordRep* rep, const CordzInfo* src,
+                     MethodIdentifier method, int64_t sampling_stride)
     : rep_(rep),
       stack_depth_(
           static_cast<size_t>(absl::GetStackTrace(stack_,
@@ -309,7 +313,8 @@
       parent_stack_depth_(FillParentStack(src, parent_stack_)),
       method_(method),
       parent_method_(GetParentMethod(src)),
-      create_time_(absl::Now()) {
+      create_time_(absl::Now()),
+      sampling_stride_(sampling_stride) {
   update_tracker_.LossyAdd(method);
   if (src) {
     // Copy parent counters.
diff --git a/absl/strings/internal/cordz_info.h b/absl/strings/internal/cordz_info.h
index 17eaa91..2dc9d16 100644
--- a/absl/strings/internal/cordz_info.h
+++ b/absl/strings/internal/cordz_info.h
@@ -60,7 +60,8 @@
   // and/or deleted. `method` identifies the Cord public API method initiating
   // the cord to be sampled.
   // Requires `cord` to hold a tree, and `cord.cordz_info()` to be null.
-  static void TrackCord(InlineData& cord, MethodIdentifier method);
+  static void TrackCord(InlineData& cord, MethodIdentifier method,
+                        int64_t sampling_stride);
 
   // Identical to TrackCord(), except that this function fills the
   // `parent_stack` and `parent_method` properties of the returned CordzInfo
@@ -181,6 +182,8 @@
   // or RemovePrefix.
   CordzStatistics GetCordzStatistics() const;
 
+  int64_t sampling_stride() const { return sampling_stride_; }
+
  private:
   using SpinLock = absl::base_internal::SpinLock;
   using SpinLockHolder = ::absl::base_internal::SpinLockHolder;
@@ -199,7 +202,7 @@
   static constexpr size_t kMaxStackDepth = 64;
 
   explicit CordzInfo(CordRep* rep, const CordzInfo* src,
-                     MethodIdentifier method);
+                     MethodIdentifier method, int64_t weight);
   ~CordzInfo() override;
 
   // Sets `rep_` without holding a lock.
@@ -250,12 +253,14 @@
   const MethodIdentifier parent_method_;
   CordzUpdateTracker update_tracker_;
   const absl::Time create_time_;
+  const int64_t sampling_stride_;
 };
 
 inline ABSL_ATTRIBUTE_ALWAYS_INLINE void CordzInfo::MaybeTrackCord(
     InlineData& cord, MethodIdentifier method) {
-  if (ABSL_PREDICT_FALSE(cordz_should_profile())) {
-    TrackCord(cord, method);
+  auto stride = cordz_should_profile();
+  if (ABSL_PREDICT_FALSE(stride > 0)) {
+    TrackCord(cord, method, stride);
   }
 }
 
diff --git a/absl/strings/internal/cordz_info_statistics_test.cc b/absl/strings/internal/cordz_info_statistics_test.cc
index d55773f..3e6a8a0 100644
--- a/absl/strings/internal/cordz_info_statistics_test.cc
+++ b/absl/strings/internal/cordz_info_statistics_test.cc
@@ -152,7 +152,7 @@
 // Samples the cord and returns CordzInfo::GetStatistics()
 CordzStatistics SampleCord(CordRep* rep) {
   InlineData cord(rep);
-  CordzInfo::TrackCord(cord, CordzUpdateTracker::kUnknown);
+  CordzInfo::TrackCord(cord, CordzUpdateTracker::kUnknown, 1);
   CordzStatistics stats = cord.cordz_info()->GetCordzStatistics();
   cord.cordz_info()->Untrack();
   return stats;
@@ -480,7 +480,7 @@
 
                 // 50/50 sample
                 if (coin_toss(gen) != 0) {
-                  CordzInfo::TrackCord(cord, CordzUpdateTracker::kUnknown);
+                  CordzInfo::TrackCord(cord, CordzUpdateTracker::kUnknown, 1);
                 }
               }
             }
diff --git a/absl/strings/internal/cordz_info_test.cc b/absl/strings/internal/cordz_info_test.cc
index cd226c3..81ecce2 100644
--- a/absl/strings/internal/cordz_info_test.cc
+++ b/absl/strings/internal/cordz_info_test.cc
@@ -65,7 +65,7 @@
 
 TEST(CordzInfoTest, TrackCord) {
   TestCordData data;
-  CordzInfo::TrackCord(data.data, kTrackCordMethod);
+  CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);
   CordzInfo* info = data.data.cordz_info();
   ASSERT_THAT(info, Ne(nullptr));
   EXPECT_FALSE(info->is_snapshot());
@@ -91,7 +91,7 @@
 TEST(CordzInfoTest, MaybeTrackChildCordWithoutSamplingParentSampled) {
   CordzSamplingIntervalHelper sample_none(99999);
   TestCordData parent, child;
-  CordzInfo::TrackCord(parent.data, kTrackCordMethod);
+  CordzInfo::TrackCord(parent.data, kTrackCordMethod, 1);
   CordzInfo::MaybeTrackCord(child.data, parent.data, kTrackCordMethod);
   CordzInfo* parent_info = parent.data.cordz_info();
   CordzInfo* child_info = child.data.cordz_info();
@@ -105,7 +105,7 @@
 TEST(CordzInfoTest, MaybeTrackChildCordWithoutSamplingChildSampled) {
   CordzSamplingIntervalHelper sample_none(99999);
   TestCordData parent, child;
-  CordzInfo::TrackCord(child.data, kTrackCordMethod);
+  CordzInfo::TrackCord(child.data, kTrackCordMethod, 1);
   CordzInfo::MaybeTrackCord(child.data, parent.data, kTrackCordMethod);
   EXPECT_THAT(child.data.cordz_info(), Eq(nullptr));
 }
@@ -113,14 +113,14 @@
 TEST(CordzInfoTest, MaybeTrackChildCordWithSamplingChildSampled) {
   CordzSamplingIntervalHelper sample_all(1);
   TestCordData parent, child;
-  CordzInfo::TrackCord(child.data, kTrackCordMethod);
+  CordzInfo::TrackCord(child.data, kTrackCordMethod, 1);
   CordzInfo::MaybeTrackCord(child.data, parent.data, kTrackCordMethod);
   EXPECT_THAT(child.data.cordz_info(), Eq(nullptr));
 }
 
 TEST(CordzInfoTest, UntrackCord) {
   TestCordData data;
-  CordzInfo::TrackCord(data.data, kTrackCordMethod);
+  CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);
   CordzInfo* info = data.data.cordz_info();
 
   info->Untrack();
@@ -129,7 +129,7 @@
 
 TEST(CordzInfoTest, UntrackCordWithSnapshot) {
   TestCordData data;
-  CordzInfo::TrackCord(data.data, kTrackCordMethod);
+  CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);
   CordzInfo* info = data.data.cordz_info();
 
   CordzSnapshot snapshot;
@@ -141,7 +141,7 @@
 
 TEST(CordzInfoTest, SetCordRep) {
   TestCordData data;
-  CordzInfo::TrackCord(data.data, kTrackCordMethod);
+  CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);
   CordzInfo* info = data.data.cordz_info();
 
   TestCordRep rep;
@@ -155,7 +155,7 @@
 
 TEST(CordzInfoTest, SetCordRepNullUntracksCordOnUnlock) {
   TestCordData data;
-  CordzInfo::TrackCord(data.data, kTrackCordMethod);
+  CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);
   CordzInfo* info = data.data.cordz_info();
 
   info->Lock(CordzUpdateTracker::kAppendString);
@@ -169,7 +169,7 @@
 
 TEST(CordzInfoTest, RefCordRep) {
   TestCordData data;
-  CordzInfo::TrackCord(data.data, kTrackCordMethod);
+  CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);
   CordzInfo* info = data.data.cordz_info();
 
   size_t refcount = data.rep.rep->refcount.Get();
@@ -183,7 +183,7 @@
 
 TEST(CordzInfoTest, SetCordRepRequiresMutex) {
   TestCordData data;
-  CordzInfo::TrackCord(data.data, kTrackCordMethod);
+  CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);
   CordzInfo* info = data.data.cordz_info();
   TestCordRep rep;
   EXPECT_DEBUG_DEATH(info->SetCordRep(rep.rep), ".*");
@@ -197,13 +197,13 @@
   EXPECT_THAT(CordzInfo::Head(snapshot), Eq(nullptr));
 
   TestCordData data;
-  CordzInfo::TrackCord(data.data, kTrackCordMethod);
+  CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);
   CordzInfo* info1 = data.data.cordz_info();
   ASSERT_THAT(CordzInfo::Head(snapshot), Eq(info1));
   EXPECT_THAT(info1->Next(snapshot), Eq(nullptr));
 
   TestCordData data2;
-  CordzInfo::TrackCord(data2.data, kTrackCordMethod);
+  CordzInfo::TrackCord(data2.data, kTrackCordMethod, 1);
   CordzInfo* info2 = data2.data.cordz_info();
   ASSERT_THAT(CordzInfo::Head(snapshot), Eq(info2));
   EXPECT_THAT(info2->Next(snapshot), Eq(info1));
@@ -222,13 +222,13 @@
   EXPECT_THAT(CordzInfo::Head(snapshot), Eq(nullptr));
 
   TestCordData data;
-  CordzInfo::TrackCord(data.data, kTrackCordMethod);
+  CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);
   CordzInfo* info1 = data.data.cordz_info();
   ASSERT_THAT(CordzInfo::Head(snapshot), Eq(info1));
   EXPECT_THAT(info1->Next(snapshot), Eq(nullptr));
 
   TestCordData data2;
-  CordzInfo::TrackCord(data2.data, kTrackCordMethod);
+  CordzInfo::TrackCord(data2.data, kTrackCordMethod, 1);
   CordzInfo* info2 = data2.data.cordz_info();
   ASSERT_THAT(CordzInfo::Head(snapshot), Eq(info2));
   EXPECT_THAT(info2->Next(snapshot), Eq(info1));
@@ -254,7 +254,7 @@
   // makes small modifications to its testing stack. 50 is sufficient to prove
   // that we got a decent stack.
   static constexpr int kMaxStackDepth = 50;
-  CordzInfo::TrackCord(data.data, kTrackCordMethod);
+  CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);
   CordzInfo* info = data.data.cordz_info();
   std::vector<void*> local_stack;
   local_stack.resize(kMaxStackDepth);
@@ -284,7 +284,7 @@
   return data.cordz_info();
 }
 CordzInfo* TrackParentCord(InlineData& data) {
-  CordzInfo::TrackCord(data, kTrackCordMethod);
+  CordzInfo::TrackCord(data, kTrackCordMethod, 1);
   return data.cordz_info();
 }
 
diff --git a/absl/strings/internal/cordz_sample_token_test.cc b/absl/strings/internal/cordz_sample_token_test.cc
index 6be1770..7152603 100644
--- a/absl/strings/internal/cordz_sample_token_test.cc
+++ b/absl/strings/internal/cordz_sample_token_test.cc
@@ -81,11 +81,11 @@
 
 TEST(CordzSampleTokenTest, Iterator) {
   TestCordData cord1, cord2, cord3;
-  CordzInfo::TrackCord(cord1.data, kTrackCordMethod);
+  CordzInfo::TrackCord(cord1.data, kTrackCordMethod, 1);
   CordzInfo* info1 = cord1.data.cordz_info();
-  CordzInfo::TrackCord(cord2.data, kTrackCordMethod);
+  CordzInfo::TrackCord(cord2.data, kTrackCordMethod, 1);
   CordzInfo* info2 = cord2.data.cordz_info();
-  CordzInfo::TrackCord(cord3.data, kTrackCordMethod);
+  CordzInfo::TrackCord(cord3.data, kTrackCordMethod, 1);
   CordzInfo* info3 = cord3.data.cordz_info();
 
   CordzSampleToken token;
@@ -105,21 +105,21 @@
   TestCordData cord1;
   TestCordData cord2;
   TestCordData cord3;
-  CordzInfo::TrackCord(cord1.data, kTrackCordMethod);
+  CordzInfo::TrackCord(cord1.data, kTrackCordMethod, 1);
   CordzInfo* info1 = cord1.data.cordz_info();
 
   CordzSampleToken token1;
   // lhs starts with the CordzInfo corresponding to cord1 at the head.
   CordzSampleToken::Iterator lhs = token1.begin();
 
-  CordzInfo::TrackCord(cord2.data, kTrackCordMethod);
+  CordzInfo::TrackCord(cord2.data, kTrackCordMethod, 1);
   CordzInfo* info2 = cord2.data.cordz_info();
 
   CordzSampleToken token2;
   // rhs starts with the CordzInfo corresponding to cord2 at the head.
   CordzSampleToken::Iterator rhs = token2.begin();
 
-  CordzInfo::TrackCord(cord3.data, kTrackCordMethod);
+  CordzInfo::TrackCord(cord3.data, kTrackCordMethod, 1);
   CordzInfo* info3 = cord3.data.cordz_info();
 
   // lhs is on cord1 while rhs is on cord2.
@@ -170,7 +170,7 @@
             cord.data.clear_cordz_info();
           } else {
             // 2) Track
-            CordzInfo::TrackCord(cord.data, kTrackCordMethod);
+            CordzInfo::TrackCord(cord.data, kTrackCordMethod, 1);
           }
         } else {
           std::unique_ptr<CordzSampleToken>& token = tokens[index];
diff --git a/absl/strings/internal/cordz_update_scope_test.cc b/absl/strings/internal/cordz_update_scope_test.cc
index 3d08c62..1b4701f 100644
--- a/absl/strings/internal/cordz_update_scope_test.cc
+++ b/absl/strings/internal/cordz_update_scope_test.cc
@@ -37,7 +37,7 @@
 
 TEST(CordzUpdateScopeTest, ScopeSampledCord) {
   TestCordData cord;
-  CordzInfo::TrackCord(cord.data, kTrackCordMethod);
+  CordzInfo::TrackCord(cord.data, kTrackCordMethod, 1);
   CordzUpdateScope scope(cord.data.cordz_info(), kTrackCordMethod);
   cord.data.cordz_info()->SetCordRep(nullptr);
 }
diff --git a/absl/strings/internal/escaping.cc b/absl/strings/internal/escaping.cc
index 56a4cbe..d2abe66 100644
--- a/absl/strings/internal/escaping.cc
+++ b/absl/strings/internal/escaping.cc
@@ -14,6 +14,8 @@
 
 #include "absl/strings/internal/escaping.h"
 
+#include <limits>
+
 #include "absl/base/internal/endian.h"
 #include "absl/base/internal/raw_logging.h"
 
@@ -31,12 +33,14 @@
 ABSL_CONST_INIT const char kWebSafeBase64Chars[] =
     "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_";
 
-
 size_t CalculateBase64EscapedLenInternal(size_t input_len, bool do_padding) {
   // Base64 encodes three bytes of input at a time. If the input is not
   // divisible by three, we pad as appropriate.
   //
   // Base64 encodes each three bytes of input into four bytes of output.
+  constexpr size_t kMaxSize = (std::numeric_limits<size_t>::max() - 1) / 4 * 3;
+  ABSL_INTERNAL_CHECK(input_len <= kMaxSize,
+                      "CalculateBase64EscapedLenInternal() overflow");
   size_t len = (input_len / 3) * 4;
 
   // Since all base 64 input is an integral number of octets, only the following
@@ -66,7 +70,6 @@
     }
   }
 
-  assert(len >= input_len);  // make sure we didn't overflow
   return len;
 }
 
diff --git a/absl/strings/internal/has_absl_stringify.h b/absl/strings/internal/has_absl_stringify.h
deleted file mode 100644
index f82cfe2..0000000
--- a/absl/strings/internal/has_absl_stringify.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2024 The Abseil Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_STRINGS_INTERNAL_HAS_ABSL_STRINGIFY_H_
-#define ABSL_STRINGS_INTERNAL_HAS_ABSL_STRINGIFY_H_
-
-#include "absl/strings/has_absl_stringify.h"
-
-#include "absl/base/config.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-
-namespace strings_internal {
-
-// This exists to fix a circular dependency problem with the GoogleTest release.
-// GoogleTest referenced this internal file and this internal trait.  Since
-// simultaneous releases are not possible since once release must reference
-// another, we will temporarily add this back.
-// https://github.com/google/googletest/blob/v1.14.x/googletest/include/gtest/gtest-printers.h#L119
-//
-// This file can be deleted after the next Abseil and GoogleTest release.
-//
-// https://github.com/google/googletest/pull/4368#issuecomment-1717699895
-// https://github.com/google/googletest/pull/4368#issuecomment-1717699895
-using ::absl::HasAbslStringify;
-
-}  // namespace strings_internal
-
-ABSL_NAMESPACE_END
-}  // namespace absl
-
-#endif  // ABSL_STRINGS_INTERNAL_HAS_ABSL_STRINGIFY_H_
diff --git a/absl/strings/internal/str_format/convert_test.cc b/absl/strings/internal/str_format/convert_test.cc
index 7f22277..baffe05 100644
--- a/absl/strings/internal/str_format/convert_test.cc
+++ b/absl/strings/internal/str_format/convert_test.cc
@@ -785,8 +785,7 @@
 }
 
 template <typename Floating>
-void TestWithMultipleFormatsHelper(const std::vector<Floating> &floats,
-                                   const std::set<Floating> &skip_verify) {
+void TestWithMultipleFormatsHelper(Floating tested_float) {
   const NativePrintfTraits &native_traits = VerifyNativeImplementation();
   // Reserve the space to ensure we don't allocate memory in the output itself.
   std::string str_format_result;
@@ -817,41 +816,41 @@
         continue;
       }
 
-      for (Floating d : floats) {
-        if (!native_traits.hex_float_prefers_denormal_repr &&
-            (f == 'a' || f == 'A') && std::fpclassify(d) == FP_SUBNORMAL) {
-          continue;
-        }
+      if (!native_traits.hex_float_prefers_denormal_repr &&
+          (f == 'a' || f == 'A') &&
+          std::fpclassify(tested_float) == FP_SUBNORMAL) {
+        continue;
+      }
         int i = -10;
-        FormatArgImpl args[2] = {FormatArgImpl(d), FormatArgImpl(i)};
+        FormatArgImpl args[2] = {FormatArgImpl(tested_float), FormatArgImpl(i)};
         UntypedFormatSpecImpl format(fmt_str);
 
         string_printf_result.clear();
-        StrAppend(&string_printf_result, fmt_str.c_str(), d, i);
+        StrAppend(&string_printf_result, fmt_str.c_str(), tested_float, i);
         str_format_result.clear();
 
         {
           AppendPack(&str_format_result, format, absl::MakeSpan(args));
         }
 
+        // For values that we know won't match the standard library
+        // implementation we skip verification, but still run the algorithm to
+        // catch asserts/sanitizer bugs.
 #ifdef _MSC_VER
         // MSVC has a different rounding policy than us so we can't test our
         // implementation against the native one there.
         continue;
 #elif defined(__APPLE__)
         // Apple formats NaN differently (+nan) vs. (nan)
-        if (std::isnan(d)) continue;
+        if (std::isnan(tested_float)) continue;
 #endif
-        if (string_printf_result != str_format_result &&
-            skip_verify.find(d) == skip_verify.end()) {
-          // We use ASSERT_EQ here because failures are usually correlated and a
-          // bug would print way too many failed expectations causing the test
-          // to time out.
-          ASSERT_EQ(string_printf_result, str_format_result)
-              << fmt_str << " " << StrPrint("%.18g", d) << " "
-              << StrPrint("%a", d) << " " << StrPrint("%.50f", d);
-        }
-      }
+        // We use ASSERT_EQ here because failures are usually correlated and a
+        // bug would print way too many failed expectations causing the test
+        // to time out.
+        ASSERT_EQ(string_printf_result, str_format_result)
+            << fmt_str << " " << StrPrint("%.18g", tested_float) << " "
+            << StrPrint("%a", tested_float) << " "
+            << StrPrint("%.50f", tested_float);
     }
   }
 }
@@ -904,14 +903,12 @@
   });
   floats.erase(std::unique(floats.begin(), floats.end()), floats.end());
 
-  TestWithMultipleFormatsHelper(floats, {});
+  for (float f : floats) {
+    TestWithMultipleFormatsHelper(f);
+  }
 }
 
 TEST_F(FormatConvertTest, Double) {
-  // For values that we know won't match the standard library implementation we
-  // skip verification, but still run the algorithm to catch asserts/sanitizer
-  // bugs.
-  std::set<double> skip_verify;
   std::vector<double> doubles = {0.0,
                                  -0.0,
                                  .99999999999999,
@@ -946,32 +943,9 @@
     }
   }
 
-  // Workaround libc bug.
-  // https://sourceware.org/bugzilla/show_bug.cgi?id=22142
-  const bool gcc_bug_22142 =
-      StrPrint("%f", std::numeric_limits<double>::max()) !=
-      "1797693134862315708145274237317043567980705675258449965989174768031"
-      "5726078002853876058955863276687817154045895351438246423432132688946"
-      "4182768467546703537516986049910576551282076245490090389328944075868"
-      "5084551339423045832369032229481658085593321233482747978262041447231"
-      "68738177180919299881250404026184124858368.000000";
-
   for (int exp = -300; exp <= 300; ++exp) {
     const double all_ones_mantissa = 0x1fffffffffffff;
     doubles.push_back(std::ldexp(all_ones_mantissa, exp));
-    if (gcc_bug_22142) {
-      skip_verify.insert(doubles.back());
-    }
-  }
-
-  if (gcc_bug_22142) {
-    using L = std::numeric_limits<double>;
-    skip_verify.insert(L::max());
-    skip_verify.insert(L::min());  // NOLINT
-    skip_verify.insert(L::denorm_min());
-    skip_verify.insert(-L::max());
-    skip_verify.insert(-L::min());  // NOLINT
-    skip_verify.insert(-L::denorm_min());
   }
 
   // Remove duplicates to speed up the logic below.
@@ -982,7 +956,9 @@
   });
   doubles.erase(std::unique(doubles.begin(), doubles.end()), doubles.end());
 
-  TestWithMultipleFormatsHelper(doubles, skip_verify);
+  for (double d : doubles) {
+    TestWithMultipleFormatsHelper(d);
+  }
 }
 
 TEST_F(FormatConvertTest, DoubleRound) {
diff --git a/absl/strings/internal/str_join_internal.h b/absl/strings/internal/str_join_internal.h
index d97d503..3e730c7 100644
--- a/absl/strings/internal/str_join_internal.h
+++ b/absl/strings/internal/str_join_internal.h
@@ -31,16 +31,23 @@
 #ifndef ABSL_STRINGS_INTERNAL_STR_JOIN_INTERNAL_H_
 #define ABSL_STRINGS_INTERNAL_STR_JOIN_INTERNAL_H_
 
+#include <cstdint>
 #include <cstring>
+#include <initializer_list>
 #include <iterator>
+#include <limits>
 #include <memory>
 #include <string>
+#include <tuple>
 #include <type_traits>
 #include <utility>
 
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
 #include "absl/strings/internal/ostringstream.h"
 #include "absl/strings/internal/resize_uninitialized.h"
 #include "absl/strings/str_cat.h"
+#include "absl/strings/string_view.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
@@ -230,14 +237,19 @@
   if (start != end) {
     // Sums size
     auto&& start_value = *start;
-    size_t result_size = start_value.size();
+    // Use uint64_t to prevent size_t overflow. We assume it is not possible for
+    // in memory strings to overflow a uint64_t.
+    uint64_t result_size = start_value.size();
     for (Iterator it = start; ++it != end;) {
       result_size += s.size();
       result_size += (*it).size();
     }
 
     if (result_size > 0) {
-      STLStringResizeUninitialized(&result, result_size);
+      constexpr uint64_t kMaxSize =
+          uint64_t{(std::numeric_limits<size_t>::max)()};
+      ABSL_INTERNAL_CHECK(result_size <= kMaxSize, "size_t overflow");
+      STLStringResizeUninitialized(&result, static_cast<size_t>(result_size));
 
       // Joins strings
       char* result_buf = &*result.begin();
@@ -310,6 +322,15 @@
   return JoinRange(begin(range), end(range), separator);
 }
 
+template <typename Tuple, std::size_t... I>
+std::string JoinTuple(const Tuple& value, absl::string_view separator,
+                      std::index_sequence<I...>) {
+  return JoinRange(
+      std::initializer_list<absl::string_view>{
+          static_cast<const AlphaNum&>(std::get<I>(value)).Piece()...},
+      separator);
+}
+
 }  // namespace strings_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/absl/strings/internal/str_split_internal.h b/absl/strings/internal/str_split_internal.h
index 081ad85..11ea96f 100644
--- a/absl/strings/internal/str_split_internal.h
+++ b/absl/strings/internal/str_split_internal.h
@@ -30,6 +30,7 @@
 #define ABSL_STRINGS_INTERNAL_STR_SPLIT_INTERNAL_H_
 
 #include <array>
+#include <cstddef>
 #include <initializer_list>
 #include <iterator>
 #include <tuple>
@@ -402,7 +403,10 @@
           ar[index].size = it->size();
           ++it;
         } while (++index != ar.size() && !it.at_end());
-        v.insert(v.end(), ar.begin(), ar.begin() + index);
+        // We static_cast index to a signed type to work around overzealous
+        // compiler warnings about signedness.
+        v.insert(v.end(), ar.begin(),
+                 ar.begin() + static_cast<ptrdiff_t>(index));
       }
       return v;
     }
diff --git a/absl/strings/numbers.cc b/absl/strings/numbers.cc
index 882c3a8..b57d9e8 100644
--- a/absl/strings/numbers.cc
+++ b/absl/strings/numbers.cc
@@ -20,9 +20,7 @@
 #include <algorithm>
 #include <cassert>
 #include <cfloat>  // for DBL_DIG and FLT_DIG
-#include <climits>
 #include <cmath>   // for HUGE_VAL
-#include <cstddef>
 #include <cstdint>
 #include <cstdio>
 #include <cstdlib>
@@ -30,7 +28,6 @@
 #include <iterator>
 #include <limits>
 #include <system_error>  // NOLINT(build/c++11)
-#include <type_traits>
 #include <utility>
 
 #include "absl/base/attributes.h"
@@ -159,71 +156,28 @@
 constexpr uint64_t kFourZeroBytes = 0x01010101 * '0';
 constexpr uint64_t kEightZeroBytes = 0x0101010101010101ull * '0';
 
-template <typename T>
-constexpr T Pow(T base, uint32_t n) {
-  // Exponentiation by squaring
-  return static_cast<T>((n > 1 ? Pow(base * base, n >> 1) : static_cast<T>(1)) *
-                        ((n & 1) ? base : static_cast<T>(1)));
-}
-
-// Given n, calculates C where the following holds for all 0 <= x < Pow(100, n):
-// x / Pow(10, n) == x * C / Pow(2, n * 10)
-// In other words, it allows us to divide by a power of 10 via a single
-// multiplication and bit shifts, assuming the input will be smaller than the
-// square of that power of 10.
-template <typename T>
-constexpr T ComputePowerOf100DivisionCoefficient(uint32_t n) {
-  if (n > 4) {
-    // This doesn't work for large powers of 100, due to overflow
-    abort();
-  }
-  T denom = 16 - 1;
-  T num = (denom + 1) - 10;
-  T gcd = 3;  // Greatest common divisor of numerator and denominator
-  denom = Pow(denom / gcd, n);
-  num = Pow(num / gcd, 9 * n);
-  T quotient = num / denom;
-  if (num % denom >= denom / 2) {
-    // Round up, since the remainder is more than half the denominator
-    ++quotient;
-  }
-  return quotient;
-}
-
-// * kDivisionBy10Mul / kDivisionBy10Div is a division by 10 for values from 0
-// to 99. It's also a division of a structure [k takes 2 bytes][m takes 2
-// bytes], then * kDivisionBy10Mul / kDivisionBy10Div will be [k / 10][m / 10].
-// It allows parallel division.
-constexpr uint64_t kDivisionBy10Mul =
-    ComputePowerOf100DivisionCoefficient<uint64_t>(1);
-static_assert(kDivisionBy10Mul == 103,
-              "division coefficient for 10 is incorrect");
+// * 103 / 1024 is a division by 10 for values from 0 to 99. It's also a
+// division of a structure [k takes 2 bytes][m takes 2 bytes], then * 103 / 1024
+// will be [k / 10][m / 10]. It allows parallel division.
+constexpr uint64_t kDivisionBy10Mul = 103u;
 constexpr uint64_t kDivisionBy10Div = 1 << 10;
 
-// * kDivisionBy100Mul / kDivisionBy100Div is a division by 100 for values from
-// 0 to 9999.
-constexpr uint64_t kDivisionBy100Mul =
-    ComputePowerOf100DivisionCoefficient<uint64_t>(2);
-static_assert(kDivisionBy100Mul == 10486,
-              "division coefficient for 100 is incorrect");
+// * 10486 / 1048576 is a division by 100 for values from 0 to 9999.
+constexpr uint64_t kDivisionBy100Mul = 10486u;
 constexpr uint64_t kDivisionBy100Div = 1 << 20;
 
-static_assert(ComputePowerOf100DivisionCoefficient<uint64_t>(3) == 1073742,
-              "division coefficient for 1000 is incorrect");
-
-// Same as `PrepareEightDigits`, but produces 2 digits for integers < 100.
-inline uint32_t PrepareTwoDigitsImpl(uint32_t i, bool reversed) {
-  assert(i < 100);
-  uint32_t div10 = (i * kDivisionBy10Mul) / kDivisionBy10Div;
-  uint32_t mod10 = i - 10u * div10;
-  return (div10 << (reversed ? 8 : 0)) + (mod10 << (reversed ? 0 : 8));
-}
-inline uint32_t PrepareTwoDigits(uint32_t i) {
-  return PrepareTwoDigitsImpl(i, false);
+// Encode functions write the ASCII output of input `n` to `out_str`.
+inline char* EncodeHundred(uint32_t n, absl::Nonnull<char*> out_str) {
+  int num_digits = static_cast<int>(n - 10) >> 8;
+  uint32_t div10 = (n * kDivisionBy10Mul) / kDivisionBy10Div;
+  uint32_t mod10 = n - 10u * div10;
+  uint32_t base = kTwoZeroBytes + div10 + (mod10 << 8);
+  base >>= num_digits & 8;
+  little_endian::Store16(out_str, static_cast<uint16_t>(base));
+  return out_str + 2 + num_digits;
 }
 
-// Same as `PrepareEightDigits`, but produces 4 digits for integers < 10000.
-inline uint32_t PrepareFourDigitsImpl(uint32_t n, bool reversed) {
+inline char* EncodeTenThousand(uint32_t n, absl::Nonnull<char*> out_str) {
   // We split lower 2 digits and upper 2 digits of n into 2 byte consecutive
   // blocks. 123 ->  [\0\1][\0\23]. We divide by 10 both blocks
   // (it's 1 division + zeroing upper bits), and compute modulo 10 as well "in
@@ -231,19 +185,22 @@
   // strip trailing zeros, add ASCII '0000' and return.
   uint32_t div100 = (n * kDivisionBy100Mul) / kDivisionBy100Div;
   uint32_t mod100 = n - 100ull * div100;
-  uint32_t hundreds =
-      (mod100 << (reversed ? 0 : 16)) + (div100 << (reversed ? 16 : 0));
+  uint32_t hundreds = (mod100 << 16) + div100;
   uint32_t tens = (hundreds * kDivisionBy10Mul) / kDivisionBy10Div;
   tens &= (0xFull << 16) | 0xFull;
-  tens = (tens << (reversed ? 8 : 0)) +
-         static_cast<uint32_t>((hundreds - 10ull * tens) << (reversed ? 0 : 8));
-  return tens;
-}
-inline uint32_t PrepareFourDigits(uint32_t n) {
-  return PrepareFourDigitsImpl(n, false);
-}
-inline uint32_t PrepareFourDigitsReversed(uint32_t n) {
-  return PrepareFourDigitsImpl(n, true);
+  tens += (hundreds - 10ull * tens) << 8;
+  ABSL_ASSUME(tens != 0);
+  // The result can contain trailing zero bits, we need to strip them to a first
+  // significant byte in a final representation. For example, for n = 123, we
+  // have tens to have representation \0\1\2\3. We do `& -8` to round
+  // to a multiple to 8 to strip zero bytes, not all zero bits.
+  // countr_zero to help.
+  // 0 minus 8 to make MSVC happy.
+  uint32_t zeroes = static_cast<uint32_t>(absl::countr_zero(tens)) & (0 - 8u);
+  tens += kFourZeroBytes;
+  tens >>= zeroes;
+  little_endian::Store32(out_str, tens);
+  return out_str + sizeof(tens) - zeroes / 8;
 }
 
 // Helper function to produce an ASCII representation of `i`.
@@ -259,309 +216,126 @@
 //  // Note two leading zeros:
 //  EXPECT_EQ(absl::string_view(ascii, 8), "00102030");
 //
-// If `Reversed` is set to true, the result becomes reversed to "03020100".
-//
 // Pre-condition: `i` must be less than 100000000.
-inline uint64_t PrepareEightDigitsImpl(uint32_t i, bool reversed) {
+inline uint64_t PrepareEightDigits(uint32_t i) {
   ABSL_ASSUME(i < 10000'0000);
   // Prepare 2 blocks of 4 digits "in parallel".
   uint32_t hi = i / 10000;
   uint32_t lo = i % 10000;
-  uint64_t merged = (uint64_t{hi} << (reversed ? 32 : 0)) |
-                    (uint64_t{lo} << (reversed ? 0 : 32));
+  uint64_t merged = hi | (uint64_t{lo} << 32);
   uint64_t div100 = ((merged * kDivisionBy100Mul) / kDivisionBy100Div) &
                     ((0x7Full << 32) | 0x7Full);
   uint64_t mod100 = merged - 100ull * div100;
-  uint64_t hundreds =
-      (mod100 << (reversed ? 0 : 16)) + (div100 << (reversed ? 16 : 0));
+  uint64_t hundreds = (mod100 << 16) + div100;
   uint64_t tens = (hundreds * kDivisionBy10Mul) / kDivisionBy10Div;
   tens &= (0xFull << 48) | (0xFull << 32) | (0xFull << 16) | 0xFull;
-  tens = (tens << (reversed ? 8 : 0)) +
-         ((hundreds - 10ull * tens) << (reversed ? 0 : 8));
+  tens += (hundreds - 10ull * tens) << 8;
   return tens;
 }
-inline uint64_t PrepareEightDigits(uint32_t i) {
-  return PrepareEightDigitsImpl(i, false);
-}
-inline uint64_t PrepareEightDigitsReversed(uint32_t i) {
-  return PrepareEightDigitsImpl(i, true);
+
+inline ABSL_ATTRIBUTE_ALWAYS_INLINE absl::Nonnull<char*> EncodeFullU32(
+    uint32_t n, absl::Nonnull<char*> out_str) {
+  if (n < 10) {
+    *out_str = static_cast<char>('0' + n);
+    return out_str + 1;
+  }
+  if (n < 100'000'000) {
+    uint64_t bottom = PrepareEightDigits(n);
+    ABSL_ASSUME(bottom != 0);
+    // 0 minus 8 to make MSVC happy.
+    uint32_t zeroes =
+        static_cast<uint32_t>(absl::countr_zero(bottom)) & (0 - 8u);
+    little_endian::Store64(out_str, (bottom + kEightZeroBytes) >> zeroes);
+    return out_str + sizeof(bottom) - zeroes / 8;
+  }
+  uint32_t div08 = n / 100'000'000;
+  uint32_t mod08 = n % 100'000'000;
+  uint64_t bottom = PrepareEightDigits(mod08) + kEightZeroBytes;
+  out_str = EncodeHundred(div08, out_str);
+  little_endian::Store64(out_str, bottom);
+  return out_str + sizeof(bottom);
 }
 
-template <typename T, typename BackwardIt>
-class FastUIntToStringConverter {
-  static_assert(
-      std::is_same<T, decltype(+std::declval<T>())>::value,
-      "to avoid code bloat, only instantiate this for int and larger types");
-  static_assert(std::is_unsigned<T>::value,
-                "this class is only for unsigned types");
-
- public:
-  // Outputs the given number backward (like with std::copy_backward),
-  // starting from the end of the string.
-  // The number of digits in the number must have been already measured and
-  // passed *exactly*, otherwise the behavior is undefined.
-  // (This is an optimization, as calculating the number of digits again would
-  // slow down the hot path.)
-  // Returns an iterator to the start of the suffix that was appended.
-  static BackwardIt FastIntToBufferBackward(T v, BackwardIt end) {
-    // THIS IS A HOT FUNCTION with a very deliberate structure to exploit branch
-    // prediction and shorten the critical path for smaller numbers.
-    // Do not move around the if/else blocks or attempt to simplify it
-    // without benchmarking any changes.
-
-    if (v < 10) {
-      goto AT_LEAST_1 /* NOTE: mandatory for the 0 case */;
-    }
-    if (v < 1000) {
-      goto AT_LEAST_10;
-    }
-    if (v < 10000000) {
-      goto AT_LEAST_1000;
-    }
-
-    if (v >= 100000000 / 10) {
-      if (v >= 10000000000000000 / 10) {
-        DoFastIntToBufferBackward<8>(v, end);
-      }
-      DoFastIntToBufferBackward<8>(v, end);
-    }
-
-    if (v >= 10000 / 10) {
-    AT_LEAST_1000:
-      DoFastIntToBufferBackward<4>(v, end);
-    }
-
-    if (v >= 100 / 10) {
-    AT_LEAST_10:
-      DoFastIntToBufferBackward<2>(v, end);
-    }
-
-    if (v >= 10 / 10) {
-    AT_LEAST_1:
-      end = DoFastIntToBufferBackward(v, end, std::integral_constant<int, 1>());
-    }
-    return end;
+inline ABSL_ATTRIBUTE_ALWAYS_INLINE char* EncodeFullU64(uint64_t i,
+                                                        char* buffer) {
+  if (i <= std::numeric_limits<uint32_t>::max()) {
+    return EncodeFullU32(static_cast<uint32_t>(i), buffer);
   }
-
- private:
-  // Only assume pointers are contiguous for now. String and vector iterators
-  // could be special-cased as well, but there's no need for them here.
-  // With C++20 we can probably switch to std::contiguous_iterator_tag.
-  static constexpr bool kIsContiguousIterator =
-      std::is_pointer<BackwardIt>::value;
-
-  template <int Exponent>
-  static void DoFastIntToBufferBackward(T& v, BackwardIt& end) {
-    constexpr T kModulus = Pow<T>(10, Exponent);
-    T remainder = static_cast<T>(v % kModulus);
-    v = static_cast<T>(v / kModulus);
-    end = DoFastIntToBufferBackward(remainder, end,
-                                    std::integral_constant<int, Exponent>());
+  uint32_t mod08;
+  if (i < 1'0000'0000'0000'0000ull) {
+    uint32_t div08 = static_cast<uint32_t>(i / 100'000'000ull);
+    mod08 =  static_cast<uint32_t>(i % 100'000'000ull);
+    buffer = EncodeFullU32(div08, buffer);
+  } else {
+    uint64_t div08 = i / 100'000'000ull;
+    mod08 =  static_cast<uint32_t>(i % 100'000'000ull);
+    uint32_t div016 = static_cast<uint32_t>(div08 / 100'000'000ull);
+    uint32_t div08mod08 = static_cast<uint32_t>(div08 % 100'000'000ull);
+    uint64_t mid_result = PrepareEightDigits(div08mod08) + kEightZeroBytes;
+    buffer = EncodeTenThousand(div016, buffer);
+    little_endian::Store64(buffer, mid_result);
+    buffer += sizeof(mid_result);
   }
-
-  static BackwardIt DoFastIntToBufferBackward(const T&, BackwardIt end,
-                                              std::integral_constant<int, 0>) {
-    return end;
-  }
-
-  static BackwardIt DoFastIntToBufferBackward(T v, BackwardIt end,
-                                              std::integral_constant<int, 1>) {
-    *--end = static_cast<char>('0' + v);
-    return DoFastIntToBufferBackward(v, end, std::integral_constant<int, 0>());
-  }
-
-  static BackwardIt DoFastIntToBufferBackward(T v, BackwardIt end,
-                                              std::integral_constant<int, 4>) {
-    if (kIsContiguousIterator) {
-      const uint32_t digits =
-          PrepareFourDigits(static_cast<uint32_t>(v)) + kFourZeroBytes;
-      end -= sizeof(digits);
-      little_endian::Store32(&*end, digits);
-    } else {
-      uint32_t digits =
-          PrepareFourDigitsReversed(static_cast<uint32_t>(v)) + kFourZeroBytes;
-      for (size_t i = 0; i < sizeof(digits); ++i) {
-        *--end = static_cast<char>(digits);
-        digits >>= CHAR_BIT;
-      }
-    }
-    return end;
-  }
-
-  static BackwardIt DoFastIntToBufferBackward(T v, BackwardIt end,
-                                              std::integral_constant<int, 8>) {
-    if (kIsContiguousIterator) {
-      const uint64_t digits =
-          PrepareEightDigits(static_cast<uint32_t>(v)) + kEightZeroBytes;
-      end -= sizeof(digits);
-      little_endian::Store64(&*end, digits);
-    } else {
-      uint64_t digits = PrepareEightDigitsReversed(static_cast<uint32_t>(v)) +
-                        kEightZeroBytes;
-      for (size_t i = 0; i < sizeof(digits); ++i) {
-        *--end = static_cast<char>(digits);
-        digits >>= CHAR_BIT;
-      }
-    }
-    return end;
-  }
-
-  template <int Digits>
-  static BackwardIt DoFastIntToBufferBackward(
-      T v, BackwardIt end, std::integral_constant<int, Digits>) {
-    constexpr int kLogModulus = Digits - Digits / 2;
-    constexpr T kModulus = Pow(static_cast<T>(10), kLogModulus);
-    bool is_safe_to_use_division_trick = Digits <= 8;
-    T quotient, remainder;
-    if (is_safe_to_use_division_trick) {
-      constexpr uint64_t kCoefficient =
-          ComputePowerOf100DivisionCoefficient<uint64_t>(kLogModulus);
-      quotient = (v * kCoefficient) >> (10 * kLogModulus);
-      remainder = v - quotient * kModulus;
-    } else {
-      quotient = v / kModulus;
-      remainder = v % kModulus;
-    }
-    end = DoFastIntToBufferBackward(remainder, end,
-                                    std::integral_constant<int, kLogModulus>());
-    return DoFastIntToBufferBackward(
-        quotient, end, std::integral_constant<int, Digits - kLogModulus>());
-  }
-};
-
-// Returns an iterator to the start of the suffix that was appended
-template <typename T, typename BackwardIt>
-std::enable_if_t<std::is_unsigned<T>::value, BackwardIt>
-DoFastIntToBufferBackward(T v, BackwardIt end, uint32_t digits) {
-  using PromotedT = std::decay_t<decltype(+v)>;
-  using Converter = FastUIntToStringConverter<PromotedT, BackwardIt>;
-  (void)digits;
-  return Converter().FastIntToBufferBackward(v, end);
-}
-
-template <typename T, typename BackwardIt>
-std::enable_if_t<std::is_signed<T>::value, BackwardIt>
-DoFastIntToBufferBackward(T v, BackwardIt end, uint32_t digits) {
-  if (absl::numbers_internal::IsNegative(v)) {
-    // Store the minus sign *before* we produce the number itself, not after.
-    // This gets us a tail call.
-    end[-static_cast<ptrdiff_t>(digits) - 1] = '-';
-  }
-  return DoFastIntToBufferBackward(
-      absl::numbers_internal::UnsignedAbsoluteValue(v), end, digits);
-}
-
-template <class T>
-std::enable_if_t<std::is_integral<T>::value, int>
-GetNumDigitsOrNegativeIfNegativeImpl(T v) {
-  const auto /* either bool or std::false_type */ is_negative =
-      absl::numbers_internal::IsNegative(v);
-  const int digits = static_cast<int>(absl::numbers_internal::Base10Digits(
-      absl::numbers_internal::UnsignedAbsoluteValue(v)));
-  return is_negative ? ~digits : digits;
+  uint64_t mod_result = PrepareEightDigits(mod08) + kEightZeroBytes;
+  little_endian::Store64(buffer, mod_result);
+  return buffer + sizeof(mod_result);
 }
 
 }  // namespace
 
 void numbers_internal::PutTwoDigits(uint32_t i, absl::Nonnull<char*> buf) {
-  little_endian::Store16(
-      buf, static_cast<uint16_t>(PrepareTwoDigits(i) + kTwoZeroBytes));
+  assert(i < 100);
+  uint32_t base = kTwoZeroBytes;
+  uint32_t div10 = (i * kDivisionBy10Mul) / kDivisionBy10Div;
+  uint32_t mod10 = i - 10u * div10;
+  base += div10 + (mod10 << 8);
+  little_endian::Store16(buf, static_cast<uint16_t>(base));
 }
 
 absl::Nonnull<char*> numbers_internal::FastIntToBuffer(
-    uint32_t i, absl::Nonnull<char*> buffer) {
-  const uint32_t digits = absl::numbers_internal::Base10Digits(i);
-  buffer += digits;
-  *buffer = '\0';  // We're going backward, so store this first
-  FastIntToBufferBackward(i, buffer, digits);
-  return buffer;
+    uint32_t n, absl::Nonnull<char*> out_str) {
+  out_str = EncodeFullU32(n, out_str);
+  *out_str = '\0';
+  return out_str;
 }
 
 absl::Nonnull<char*> numbers_internal::FastIntToBuffer(
     int32_t i, absl::Nonnull<char*> buffer) {
-  buffer += static_cast<int>(i < 0);
-  uint32_t digits = absl::numbers_internal::Base10Digits(
-      absl::numbers_internal::UnsignedAbsoluteValue(i));
-  buffer += digits;
-  *buffer = '\0';  // We're going backward, so store this first
-  FastIntToBufferBackward(i, buffer, digits);
+  uint32_t u = static_cast<uint32_t>(i);
+  if (i < 0) {
+    *buffer++ = '-';
+    // We need to do the negation in modular (i.e., "unsigned")
+    // arithmetic; MSVC++ apparently warns for plain "-u", so
+    // we write the equivalent expression "0 - u" instead.
+    u = 0 - u;
+  }
+  buffer = EncodeFullU32(u, buffer);
+  *buffer = '\0';
   return buffer;
 }
 
 absl::Nonnull<char*> numbers_internal::FastIntToBuffer(
     uint64_t i, absl::Nonnull<char*> buffer) {
-  uint32_t digits = absl::numbers_internal::Base10Digits(i);
-  buffer += digits;
-  *buffer = '\0';  // We're going backward, so store this first
-  FastIntToBufferBackward(i, buffer, digits);
+  buffer = EncodeFullU64(i, buffer);
+  *buffer = '\0';
   return buffer;
 }
 
 absl::Nonnull<char*> numbers_internal::FastIntToBuffer(
     int64_t i, absl::Nonnull<char*> buffer) {
-  buffer += static_cast<int>(i < 0);
-  uint32_t digits = absl::numbers_internal::Base10Digits(
-      absl::numbers_internal::UnsignedAbsoluteValue(i));
-  buffer += digits;
-  *buffer = '\0';  // We're going backward, so store this first
-  FastIntToBufferBackward(i, buffer, digits);
+  uint64_t u = static_cast<uint64_t>(i);
+  if (i < 0) {
+    *buffer++ = '-';
+    // We need to do the negation in modular (i.e., "unsigned")
+    // arithmetic; MSVC++ apparently warns for plain "-u", so
+    // we write the equivalent expression "0 - u" instead.
+    u = 0 - u;
+  }
+  buffer = EncodeFullU64(u, buffer);
+  *buffer = '\0';
   return buffer;
 }
 
-absl::Nonnull<char*> numbers_internal::FastIntToBufferBackward(
-    uint32_t i, absl::Nonnull<char*> buffer_end, uint32_t exact_digit_count) {
-  return DoFastIntToBufferBackward(i, buffer_end, exact_digit_count);
-}
-
-absl::Nonnull<char*> numbers_internal::FastIntToBufferBackward(
-    int32_t i, absl::Nonnull<char*> buffer_end, uint32_t exact_digit_count) {
-  return DoFastIntToBufferBackward(i, buffer_end, exact_digit_count);
-}
-
-absl::Nonnull<char*> numbers_internal::FastIntToBufferBackward(
-    uint64_t i, absl::Nonnull<char*> buffer_end, uint32_t exact_digit_count) {
-  return DoFastIntToBufferBackward(i, buffer_end, exact_digit_count);
-}
-
-absl::Nonnull<char*> numbers_internal::FastIntToBufferBackward(
-    int64_t i, absl::Nonnull<char*> buffer_end, uint32_t exact_digit_count) {
-  return DoFastIntToBufferBackward(i, buffer_end, exact_digit_count);
-}
-
-int numbers_internal::GetNumDigitsOrNegativeIfNegative(signed char v) {
-  return GetNumDigitsOrNegativeIfNegativeImpl(v);
-}
-int numbers_internal::GetNumDigitsOrNegativeIfNegative(unsigned char v) {
-  return GetNumDigitsOrNegativeIfNegativeImpl(v);
-}
-int numbers_internal::GetNumDigitsOrNegativeIfNegative(short v) {  // NOLINT
-  return GetNumDigitsOrNegativeIfNegativeImpl(v);
-}
-int numbers_internal::GetNumDigitsOrNegativeIfNegative(
-    unsigned short v) {  // NOLINT
-  return GetNumDigitsOrNegativeIfNegativeImpl(v);
-}
-int numbers_internal::GetNumDigitsOrNegativeIfNegative(int v) {
-  return GetNumDigitsOrNegativeIfNegativeImpl(v);
-}
-int numbers_internal::GetNumDigitsOrNegativeIfNegative(unsigned int v) {
-  return GetNumDigitsOrNegativeIfNegativeImpl(v);
-}
-int numbers_internal::GetNumDigitsOrNegativeIfNegative(long v) {  // NOLINT
-  return GetNumDigitsOrNegativeIfNegativeImpl(v);
-}
-int numbers_internal::GetNumDigitsOrNegativeIfNegative(
-    unsigned long v) {  // NOLINT
-  return GetNumDigitsOrNegativeIfNegativeImpl(v);
-}
-int numbers_internal::GetNumDigitsOrNegativeIfNegative(long long v) {  // NOLINT
-  return GetNumDigitsOrNegativeIfNegativeImpl(v);
-}
-int numbers_internal::GetNumDigitsOrNegativeIfNegative(
-    unsigned long long v) {  // NOLINT
-  return GetNumDigitsOrNegativeIfNegativeImpl(v);
-}
-
 // Given a 128-bit number expressed as a pair of uint64_t, high half first,
 // return that number multiplied by the given 32-bit value.  If the result is
 // too large to fit in a 128-bit number, divide it by 2 until it fits.
diff --git a/absl/strings/numbers.h b/absl/strings/numbers.h
index ad4e66b..739dbb2 100644
--- a/absl/strings/numbers.h
+++ b/absl/strings/numbers.h
@@ -32,7 +32,6 @@
 #endif
 
 #include <cstddef>
-#include <cstdint>
 #include <cstdlib>
 #include <cstring>
 #include <ctime>
@@ -40,12 +39,10 @@
 #include <string>
 #include <type_traits>
 
-#include "absl/base/attributes.h"
 #include "absl/base/config.h"
 #include "absl/base/internal/endian.h"
 #include "absl/base/macros.h"
 #include "absl/base/nullability.h"
-#include "absl/base/optimization.h"
 #include "absl/base/port.h"
 #include "absl/numeric/bits.h"
 #include "absl/numeric/int128.h"
@@ -161,96 +158,6 @@
 static const int kFastToBufferSize = 32;
 static const int kSixDigitsToBufferSize = 16;
 
-template <class T>
-std::enable_if_t<!std::is_unsigned<T>::value, bool> IsNegative(const T& v) {
-  return v < T();
-}
-
-template <class T>
-std::enable_if_t<std::is_unsigned<T>::value, std::false_type> IsNegative(
-    const T&) {
-  // The integer is unsigned, so return a compile-time constant.
-  // This can help the optimizer avoid having to prove bool to be false later.
-  return std::false_type();
-}
-
-template <class T>
-std::enable_if_t<std::is_unsigned<std::decay_t<T>>::value, T&&>
-UnsignedAbsoluteValue(T&& v ABSL_ATTRIBUTE_LIFETIME_BOUND) {
-  // The value is unsigned; just return the original.
-  return std::forward<T>(v);
-}
-
-template <class T>
-ABSL_ATTRIBUTE_CONST_FUNCTION
-    std::enable_if_t<!std::is_unsigned<T>::value, std::make_unsigned_t<T>>
-    UnsignedAbsoluteValue(T v) {
-  using U = std::make_unsigned_t<T>;
-  return IsNegative(v) ? U() - static_cast<U>(v) : static_cast<U>(v);
-}
-
-// Returns the number of base-10 digits in the given number.
-// Note that this strictly counts digits. It does not count the sign.
-// The `initial_digits` parameter is the starting point, which is normally equal
-// to 1 because the number of digits in 0 is 1 (a special case).
-// However, callers may e.g. wish to change it to 2 to account for the sign.
-template <typename T>
-std::enable_if_t<std::is_unsigned<T>::value, uint32_t> Base10Digits(
-    T v, const uint32_t initial_digits = 1) {
-  uint32_t r = initial_digits;
-  // If code size becomes an issue, the 'if' stage can be removed for a minor
-  // performance loss.
-  for (;;) {
-    if (ABSL_PREDICT_TRUE(v < 10 * 10)) {
-      r += (v >= 10);
-      break;
-    }
-    if (ABSL_PREDICT_TRUE(v < 1000 * 10)) {
-      r += (v >= 1000) + 2;
-      break;
-    }
-    if (ABSL_PREDICT_TRUE(v < 100000 * 10)) {
-      r += (v >= 100000) + 4;
-      break;
-    }
-    r += 6;
-    v = static_cast<T>(v / 1000000);
-  }
-  return r;
-}
-
-template <typename T>
-std::enable_if_t<std::is_signed<T>::value, uint32_t> Base10Digits(
-    T v, uint32_t r = 1) {
-  // Branchlessly add 1 to account for a minus sign.
-  r += static_cast<uint32_t>(IsNegative(v));
-  return Base10Digits(UnsignedAbsoluteValue(v), r);
-}
-
-// These functions return the number of base-10 digits, but multiplied by -1 if
-// the input itself is negative. This is handy and efficient for later usage,
-// since the bitwise complement of the result becomes equal to the number of
-// characters required.
-ABSL_ATTRIBUTE_CONST_FUNCTION int GetNumDigitsOrNegativeIfNegative(
-    signed char v);
-ABSL_ATTRIBUTE_CONST_FUNCTION int GetNumDigitsOrNegativeIfNegative(
-    unsigned char v);
-ABSL_ATTRIBUTE_CONST_FUNCTION int GetNumDigitsOrNegativeIfNegative(
-    short v);  // NOLINT
-ABSL_ATTRIBUTE_CONST_FUNCTION int GetNumDigitsOrNegativeIfNegative(
-    unsigned short v);  // NOLINT
-ABSL_ATTRIBUTE_CONST_FUNCTION int GetNumDigitsOrNegativeIfNegative(int v);
-ABSL_ATTRIBUTE_CONST_FUNCTION int GetNumDigitsOrNegativeIfNegative(
-    unsigned int v);
-ABSL_ATTRIBUTE_CONST_FUNCTION int GetNumDigitsOrNegativeIfNegative(
-    long v);  // NOLINT
-ABSL_ATTRIBUTE_CONST_FUNCTION int GetNumDigitsOrNegativeIfNegative(
-    unsigned long v);  // NOLINT
-ABSL_ATTRIBUTE_CONST_FUNCTION int GetNumDigitsOrNegativeIfNegative(
-    long long v);  // NOLINT
-ABSL_ATTRIBUTE_CONST_FUNCTION int GetNumDigitsOrNegativeIfNegative(
-    unsigned long long v);  // NOLINT
-
 // Helper function for fast formatting of floating-point values.
 // The result is the same as printf's "%g", a.k.a. "%.6g"; that is, six
 // significant digits are returned, trailing zeros are removed, and numbers
@@ -259,18 +166,24 @@
 // Required buffer size is `kSixDigitsToBufferSize`.
 size_t SixDigitsToBuffer(double d, absl::Nonnull<char*> buffer);
 
-// All of these functions take an output buffer
+// WARNING: These functions may write more characters than necessary, because
+// they are intended for speed. All functions take an output buffer
 // as an argument and return a pointer to the last byte they wrote, which is the
 // terminating '\0'. At most `kFastToBufferSize` bytes are written.
-absl::Nonnull<char*> FastIntToBuffer(int32_t i, absl::Nonnull<char*> buffer);
-absl::Nonnull<char*> FastIntToBuffer(uint32_t i, absl::Nonnull<char*> buffer);
-absl::Nonnull<char*> FastIntToBuffer(int64_t i, absl::Nonnull<char*> buffer);
-absl::Nonnull<char*> FastIntToBuffer(uint64_t i, absl::Nonnull<char*> buffer);
+absl::Nonnull<char*> FastIntToBuffer(int32_t i, absl::Nonnull<char*> buffer)
+    ABSL_INTERNAL_NEED_MIN_SIZE(buffer, kFastToBufferSize);
+absl::Nonnull<char*> FastIntToBuffer(uint32_t n, absl::Nonnull<char*> out_str)
+    ABSL_INTERNAL_NEED_MIN_SIZE(out_str, kFastToBufferSize);
+absl::Nonnull<char*> FastIntToBuffer(int64_t i, absl::Nonnull<char*> buffer)
+    ABSL_INTERNAL_NEED_MIN_SIZE(buffer, kFastToBufferSize);
+absl::Nonnull<char*> FastIntToBuffer(uint64_t i, absl::Nonnull<char*> buffer)
+    ABSL_INTERNAL_NEED_MIN_SIZE(buffer, kFastToBufferSize);
 
 // For enums and integer types that are not an exact match for the types above,
 // use templates to call the appropriate one of the four overloads above.
 template <typename int_type>
-absl::Nonnull<char*> FastIntToBuffer(int_type i, absl::Nonnull<char*> buffer) {
+absl::Nonnull<char*> FastIntToBuffer(int_type i, absl::Nonnull<char*> buffer)
+    ABSL_INTERNAL_NEED_MIN_SIZE(buffer, kFastToBufferSize) {
   static_assert(sizeof(i) <= 64 / 8,
                 "FastIntToBuffer works only with 64-bit-or-less integers.");
   // TODO(jorg): This signed-ness check is used because it works correctly
@@ -294,58 +207,6 @@
   }
 }
 
-// These functions do NOT add any null-terminator.
-// They return a pointer to the beginning of the written string.
-// The digit counts provided must *exactly* match the number of base-10 digits
-// in the number, or the behavior is undefined.
-// (i.e. do NOT count the minus sign, or over- or under-count the digits.)
-absl::Nonnull<char*> FastIntToBufferBackward(int32_t i,
-                                             absl::Nonnull<char*> buffer_end,
-                                             uint32_t exact_digit_count);
-absl::Nonnull<char*> FastIntToBufferBackward(uint32_t i,
-                                             absl::Nonnull<char*> buffer_end,
-                                             uint32_t exact_digit_count);
-absl::Nonnull<char*> FastIntToBufferBackward(int64_t i,
-                                             absl::Nonnull<char*> buffer_end,
-                                             uint32_t exact_digit_count);
-absl::Nonnull<char*> FastIntToBufferBackward(uint64_t i,
-                                             absl::Nonnull<char*> buffer_end,
-                                             uint32_t exact_digit_count);
-
-// For enums and integer types that are not an exact match for the types above,
-// use templates to call the appropriate one of the four overloads above.
-template <typename int_type>
-absl::Nonnull<char*> FastIntToBufferBackward(int_type i,
-                                             absl::Nonnull<char*> buffer_end,
-                                             uint32_t exact_digit_count) {
-  static_assert(
-      sizeof(i) <= 64 / 8,
-      "FastIntToBufferBackward works only with 64-bit-or-less integers.");
-  // This signed-ness check is used because it works correctly
-  // with enums, and it also serves to check that int_type is not a pointer.
-  // If one day something like std::is_signed<enum E> works, switch to it.
-  // These conditions are constexpr bools to suppress MSVC warning C4127.
-  constexpr bool kIsSigned = static_cast<int_type>(1) - 2 < 0;
-  constexpr bool kUse64Bit = sizeof(i) > 32 / 8;
-  if (kIsSigned) {
-    if (kUse64Bit) {
-      return FastIntToBufferBackward(static_cast<int64_t>(i), buffer_end,
-                                     exact_digit_count);
-    } else {
-      return FastIntToBufferBackward(static_cast<int32_t>(i), buffer_end,
-                                     exact_digit_count);
-    }
-  } else {
-    if (kUse64Bit) {
-      return FastIntToBufferBackward(static_cast<uint64_t>(i), buffer_end,
-                                     exact_digit_count);
-    } else {
-      return FastIntToBufferBackward(static_cast<uint32_t>(i), buffer_end,
-                                     exact_digit_count);
-    }
-  }
-}
-
 // Implementation of SimpleAtoi, generalized to support arbitrary base (used
 // with base different from 10 elsewhere in Abseil implementation).
 template <typename int_type>
diff --git a/absl/strings/numbers_test.cc b/absl/strings/numbers_test.cc
index 1ceff70..75c2dcf 100644
--- a/absl/strings/numbers_test.cc
+++ b/absl/strings/numbers_test.cc
@@ -231,15 +231,10 @@
   CheckInt32(INT_MIN);
   CheckInt32(INT_MAX);
   CheckInt64(LONG_MIN);
-  CheckInt64(uint64_t{10000000});
-  CheckInt64(uint64_t{100000000});
   CheckInt64(uint64_t{1000000000});
   CheckInt64(uint64_t{9999999999});
   CheckInt64(uint64_t{100000000000000});
   CheckInt64(uint64_t{999999999999999});
-  CheckInt64(uint64_t{1000000000000000});
-  CheckInt64(uint64_t{10000000000000000});
-  CheckInt64(uint64_t{100000000000000000});
   CheckInt64(uint64_t{1000000000000000000});
   CheckInt64(uint64_t{1199999999999999999});
   CheckInt64(int64_t{-700000000000000000});
@@ -251,8 +246,6 @@
   CheckUInt64(uint64_t{999999999999999});
   CheckUInt64(uint64_t{1000000000000000000});
   CheckUInt64(uint64_t{1199999999999999999});
-  CheckUInt64(uint64_t{10000000000000000000u});
-  CheckUInt64(uint64_t{10200300040000500006u});
   CheckUInt64(std::numeric_limits<uint64_t>::max());
 
   for (int i = 0; i < 10000; i++) {
diff --git a/absl/strings/str_cat.cc b/absl/strings/str_cat.cc
index 098ab18..c51c137 100644
--- a/absl/strings/str_cat.cc
+++ b/absl/strings/str_cat.cc
@@ -20,19 +20,18 @@
 #include <cstdint>
 #include <cstring>
 #include <initializer_list>
+#include <limits>
 #include <string>
-#include <type_traits>
 
 #include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
 #include "absl/base/nullability.h"
 #include "absl/strings/internal/resize_uninitialized.h"
-#include "absl/strings/numbers.h"
 #include "absl/strings/string_view.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 
-
 // ----------------------------------------------------------------------
 // StrCat()
 //    This merges the given strings or integers, with no delimiter. This
@@ -43,7 +42,8 @@
 namespace {
 // Append is merely a version of memcpy that returns the address of the byte
 // after the area just overwritten.
-absl::Nonnull<char*> Append(absl::Nonnull<char*> out, const AlphaNum& x) {
+inline absl::Nonnull<char*> Append(absl::Nonnull<char*> out,
+                                   const AlphaNum& x) {
   // memcpy is allowed to overwrite arbitrary memory, so doing this after the
   // call would force an extra fetch of x.size().
   char* after = out + x.size();
@@ -53,12 +53,23 @@
   return after;
 }
 
+inline void STLStringAppendUninitializedAmortized(std::string* dest,
+                                                  size_t to_append) {
+  strings_internal::AppendUninitializedTraits<std::string>::Append(dest,
+                                                                   to_append);
+}
 }  // namespace
 
 std::string StrCat(const AlphaNum& a, const AlphaNum& b) {
   std::string result;
-  absl::strings_internal::STLStringResizeUninitialized(&result,
-                                                       a.size() + b.size());
+  // Use uint64_t to prevent size_t overflow. We assume it is not possible for
+  // in memory strings to overflow a uint64_t.
+  constexpr uint64_t kMaxSize = uint64_t{std::numeric_limits<size_t>::max()};
+  const uint64_t result_size =
+      static_cast<uint64_t>(a.size()) + static_cast<uint64_t>(b.size());
+  ABSL_INTERNAL_CHECK(result_size <= kMaxSize, "size_t overflow");
+  absl::strings_internal::STLStringResizeUninitialized(
+      &result, static_cast<size_t>(result_size));
   char* const begin = &result[0];
   char* out = begin;
   out = Append(out, a);
@@ -69,8 +80,15 @@
 
 std::string StrCat(const AlphaNum& a, const AlphaNum& b, const AlphaNum& c) {
   std::string result;
+  // Use uint64_t to prevent size_t overflow. We assume it is not possible for
+  // in memory strings to overflow a uint64_t.
+  constexpr uint64_t kMaxSize = uint64_t{std::numeric_limits<size_t>::max()};
+  const uint64_t result_size = static_cast<uint64_t>(a.size()) +
+                               static_cast<uint64_t>(b.size()) +
+                               static_cast<uint64_t>(c.size());
+  ABSL_INTERNAL_CHECK(result_size <= kMaxSize, "size_t overflow");
   strings_internal::STLStringResizeUninitialized(
-      &result, a.size() + b.size() + c.size());
+      &result, static_cast<size_t>(result_size));
   char* const begin = &result[0];
   char* out = begin;
   out = Append(out, a);
@@ -83,8 +101,16 @@
 std::string StrCat(const AlphaNum& a, const AlphaNum& b, const AlphaNum& c,
                    const AlphaNum& d) {
   std::string result;
+  // Use uint64_t to prevent size_t overflow. We assume it is not possible for
+  // in memory strings to overflow a uint64_t.
+  constexpr uint64_t kMaxSize = uint64_t{std::numeric_limits<size_t>::max()};
+  const uint64_t result_size = static_cast<uint64_t>(a.size()) +
+                               static_cast<uint64_t>(b.size()) +
+                               static_cast<uint64_t>(c.size()) +
+                               static_cast<uint64_t>(d.size());
+  ABSL_INTERNAL_CHECK(result_size <= kMaxSize, "size_t overflow");
   strings_internal::STLStringResizeUninitialized(
-      &result, a.size() + b.size() + c.size() + d.size());
+      &result, static_cast<size_t>(result_size));
   char* const begin = &result[0];
   char* out = begin;
   out = Append(out, a);
@@ -98,135 +124,18 @@
 namespace strings_internal {
 
 // Do not call directly - these are not part of the public API.
-void STLStringAppendUninitializedAmortized(std::string* dest,
-                                           size_t to_append) {
-  strings_internal::AppendUninitializedTraits<std::string>::Append(dest,
-                                                                   to_append);
-}
-
-template <typename Integer>
-std::enable_if_t<std::is_integral<Integer>::value, std::string> IntegerToString(
-    Integer i) {
-  std::string str;
-  const auto /* either bool or std::false_type */ is_negative =
-      absl::numbers_internal::IsNegative(i);
-  const uint32_t digits = absl::numbers_internal::Base10Digits(
-      absl::numbers_internal::UnsignedAbsoluteValue(i));
-  absl::strings_internal::STLStringResizeUninitialized(
-      &str, digits + static_cast<uint32_t>(is_negative));
-  absl::numbers_internal::FastIntToBufferBackward(i, &str[str.size()], digits);
-  return str;
-}
-
-template <>
-std::string IntegerToString(long i) {  // NOLINT
-  if (sizeof(i) <= sizeof(int)) {
-    return IntegerToString(static_cast<int>(i));
-  } else {
-    return IntegerToString(static_cast<long long>(i));  // NOLINT
-  }
-}
-
-template <>
-std::string IntegerToString(unsigned long i) {  // NOLINT
-  if (sizeof(i) <= sizeof(unsigned int)) {
-    return IntegerToString(static_cast<unsigned int>(i));
-  } else {
-    return IntegerToString(static_cast<unsigned long long>(i));  // NOLINT
-  }
-}
-
-template <typename Float>
-std::enable_if_t<std::is_floating_point<Float>::value, std::string>
-FloatToString(Float f) {
-  std::string result;
-  strings_internal::STLStringResizeUninitialized(
-      &result, numbers_internal::kSixDigitsToBufferSize);
-  char* start = &result[0];
-  result.erase(numbers_internal::SixDigitsToBuffer(f, start));
-  return result;
-}
-
-std::string SingleArgStrCat(int x) { return IntegerToString(x); }
-std::string SingleArgStrCat(unsigned int x) { return IntegerToString(x); }
-// NOLINTNEXTLINE
-std::string SingleArgStrCat(long x) { return IntegerToString(x); }
-// NOLINTNEXTLINE
-std::string SingleArgStrCat(unsigned long x) { return IntegerToString(x); }
-// NOLINTNEXTLINE
-std::string SingleArgStrCat(long long x) { return IntegerToString(x); }
-// NOLINTNEXTLINE
-std::string SingleArgStrCat(unsigned long long x) { return IntegerToString(x); }
-std::string SingleArgStrCat(float x) { return FloatToString(x); }
-std::string SingleArgStrCat(double x) { return FloatToString(x); }
-
-template <class Integer>
-std::enable_if_t<std::is_integral<Integer>::value, void> AppendIntegerToString(
-    std::string& str, Integer i) {
-  const auto /* either bool or std::false_type */ is_negative =
-      absl::numbers_internal::IsNegative(i);
-  const uint32_t digits = absl::numbers_internal::Base10Digits(
-      absl::numbers_internal::UnsignedAbsoluteValue(i));
-  absl::strings_internal::STLStringAppendUninitializedAmortized(
-      &str, digits + static_cast<uint32_t>(is_negative));
-  absl::numbers_internal::FastIntToBufferBackward(i, &str[str.size()], digits);
-}
-
-template <>
-void AppendIntegerToString(std::string& str, long i) {  // NOLINT
-  if (sizeof(i) <= sizeof(int)) {
-    return AppendIntegerToString(str, static_cast<int>(i));
-  } else {
-    return AppendIntegerToString(str, static_cast<long long>(i));  // NOLINT
-  }
-}
-
-template <>
-void AppendIntegerToString(std::string& str,
-                           unsigned long i) {  // NOLINT
-  if (sizeof(i) <= sizeof(unsigned int)) {
-    return AppendIntegerToString(str, static_cast<unsigned int>(i));
-  } else {
-    return AppendIntegerToString(str,
-                                 static_cast<unsigned long long>(i));  // NOLINT
-  }
-}
-
-// `SingleArgStrAppend` overloads are defined here for the same reasons as with
-// `SingleArgStrCat` above.
-void SingleArgStrAppend(std::string& str, int x) {
-  return AppendIntegerToString(str, x);
-}
-
-void SingleArgStrAppend(std::string& str, unsigned int x) {
-  return AppendIntegerToString(str, x);
-}
-
-// NOLINTNEXTLINE
-void SingleArgStrAppend(std::string& str, long x) {
-  return AppendIntegerToString(str, x);
-}
-
-// NOLINTNEXTLINE
-void SingleArgStrAppend(std::string& str, unsigned long x) {
-  return AppendIntegerToString(str, x);
-}
-
-// NOLINTNEXTLINE
-void SingleArgStrAppend(std::string& str, long long x) {
-  return AppendIntegerToString(str, x);
-}
-
-// NOLINTNEXTLINE
-void SingleArgStrAppend(std::string& str, unsigned long long x) {
-  return AppendIntegerToString(str, x);
-}
-
 std::string CatPieces(std::initializer_list<absl::string_view> pieces) {
   std::string result;
-  size_t total_size = 0;
-  for (absl::string_view piece : pieces) total_size += piece.size();
-  strings_internal::STLStringResizeUninitialized(&result, total_size);
+  // Use uint64_t to prevent size_t overflow. We assume it is not possible for
+  // in memory strings to overflow a uint64_t.
+  constexpr uint64_t kMaxSize = uint64_t{std::numeric_limits<size_t>::max()};
+  uint64_t total_size = 0;
+  for (absl::string_view piece : pieces) {
+    total_size += piece.size();
+  }
+  ABSL_INTERNAL_CHECK(total_size <= kMaxSize, "size_t overflow");
+  strings_internal::STLStringResizeUninitialized(
+      &result, static_cast<size_t>(total_size));
 
   char* const begin = &result[0];
   char* out = begin;
@@ -258,7 +167,7 @@
     ASSERT_NO_OVERLAP(*dest, piece);
     to_append += piece.size();
   }
-  strings_internal::STLStringAppendUninitializedAmortized(dest, to_append);
+  STLStringAppendUninitializedAmortized(dest, to_append);
 
   char* const begin = &(*dest)[0];
   char* out = begin + old_size;
@@ -277,7 +186,7 @@
 void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a) {
   ASSERT_NO_OVERLAP(*dest, a);
   std::string::size_type old_size = dest->size();
-  strings_internal::STLStringAppendUninitializedAmortized(dest, a.size());
+  STLStringAppendUninitializedAmortized(dest, a.size());
   char* const begin = &(*dest)[0];
   char* out = begin + old_size;
   out = Append(out, a);
@@ -289,8 +198,7 @@
   ASSERT_NO_OVERLAP(*dest, a);
   ASSERT_NO_OVERLAP(*dest, b);
   std::string::size_type old_size = dest->size();
-  strings_internal::STLStringAppendUninitializedAmortized(dest,
-                                                          a.size() + b.size());
+  STLStringAppendUninitializedAmortized(dest, a.size() + b.size());
   char* const begin = &(*dest)[0];
   char* out = begin + old_size;
   out = Append(out, a);
@@ -304,8 +212,7 @@
   ASSERT_NO_OVERLAP(*dest, b);
   ASSERT_NO_OVERLAP(*dest, c);
   std::string::size_type old_size = dest->size();
-  strings_internal::STLStringAppendUninitializedAmortized(
-      dest, a.size() + b.size() + c.size());
+  STLStringAppendUninitializedAmortized(dest, a.size() + b.size() + c.size());
   char* const begin = &(*dest)[0];
   char* out = begin + old_size;
   out = Append(out, a);
@@ -321,7 +228,7 @@
   ASSERT_NO_OVERLAP(*dest, c);
   ASSERT_NO_OVERLAP(*dest, d);
   std::string::size_type old_size = dest->size();
-  strings_internal::STLStringAppendUninitializedAmortized(
+  STLStringAppendUninitializedAmortized(
       dest, a.size() + b.size() + c.size() + d.size());
   char* const begin = &(*dest)[0];
   char* out = begin + old_size;
diff --git a/absl/strings/str_cat.h b/absl/strings/str_cat.h
index ea2c4dc..b98adc0 100644
--- a/absl/strings/str_cat.h
+++ b/absl/strings/str_cat.h
@@ -93,6 +93,8 @@
 #include <cstddef>
 #include <cstdint>
 #include <cstring>
+#include <initializer_list>
+#include <limits>
 #include <string>
 #include <type_traits>
 #include <utility>
@@ -312,6 +314,10 @@
   // No bool ctor -- bools convert to an integral type.
   // A bool ctor would also convert incoming pointers (bletch).
 
+  // Prevent brace initialization
+  template <typename T>
+  AlphaNum(std::initializer_list<T>) = delete;  // NOLINT(runtime/explicit)
+
   AlphaNum(int x)  // NOLINT(runtime/explicit)
       : piece_(digits_, static_cast<size_t>(
                             numbers_internal::FastIntToBuffer(x, digits_) -
@@ -448,36 +454,77 @@
 void AppendPieces(absl::Nonnull<std::string*> dest,
                   std::initializer_list<absl::string_view> pieces);
 
-void STLStringAppendUninitializedAmortized(std::string* dest, size_t to_append);
+template <typename Integer>
+std::string IntegerToString(Integer i) {
+  // Any integer (signed/unsigned) up to 64 bits can be formatted into a buffer
+  // with 22 bytes (including NULL at the end).
+  constexpr size_t kMaxDigits10 = 22;
+  std::string result;
+  strings_internal::STLStringResizeUninitialized(&result, kMaxDigits10);
+  char* start = &result[0];
+  // note: this can be optimized to not write last zero.
+  char* end = numbers_internal::FastIntToBuffer(i, start);
+  auto size = static_cast<size_t>(end - start);
+  assert((size < result.size()) &&
+         "StrCat(Integer) does not fit into kMaxDigits10");
+  result.erase(size);
+  return result;
+}
+template <typename Float>
+std::string FloatToString(Float f) {
+  std::string result;
+  strings_internal::STLStringResizeUninitialized(
+      &result, numbers_internal::kSixDigitsToBufferSize);
+  char* start = &result[0];
+  result.erase(numbers_internal::SixDigitsToBuffer(f, start));
+  return result;
+}
 
 // `SingleArgStrCat` overloads take built-in `int`, `long` and `long long` types
 // (signed / unsigned) to avoid ambiguity on the call side. If we used int32_t
 // and int64_t, then at least one of the three (`int` / `long` / `long long`)
 // would have been ambiguous when passed to `SingleArgStrCat`.
-std::string SingleArgStrCat(int x);
-std::string SingleArgStrCat(unsigned int x);
-std::string SingleArgStrCat(long x);                // NOLINT
-std::string SingleArgStrCat(unsigned long x);       // NOLINT
-std::string SingleArgStrCat(long long x);           // NOLINT
-std::string SingleArgStrCat(unsigned long long x);  // NOLINT
-std::string SingleArgStrCat(float x);
-std::string SingleArgStrCat(double x);
+inline std::string SingleArgStrCat(int x) { return IntegerToString(x); }
+inline std::string SingleArgStrCat(unsigned int x) {
+  return IntegerToString(x);
+}
+// NOLINTNEXTLINE
+inline std::string SingleArgStrCat(long x) { return IntegerToString(x); }
+// NOLINTNEXTLINE
+inline std::string SingleArgStrCat(unsigned long x) {
+  return IntegerToString(x);
+}
+// NOLINTNEXTLINE
+inline std::string SingleArgStrCat(long long x) { return IntegerToString(x); }
+// NOLINTNEXTLINE
+inline std::string SingleArgStrCat(unsigned long long x) {
+  return IntegerToString(x);
+}
+inline std::string SingleArgStrCat(float x) { return FloatToString(x); }
+inline std::string SingleArgStrCat(double x) { return FloatToString(x); }
 
-// `SingleArgStrAppend` overloads are defined here for the same reasons as with
-// `SingleArgStrCat` above.
-void SingleArgStrAppend(std::string& str, int x);
-void SingleArgStrAppend(std::string& str, unsigned int x);
-void SingleArgStrAppend(std::string& str, long x);                // NOLINT
-void SingleArgStrAppend(std::string& str, unsigned long x);       // NOLINT
-void SingleArgStrAppend(std::string& str, long long x);           // NOLINT
-void SingleArgStrAppend(std::string& str, unsigned long long x);  // NOLINT
+// As of September 2023, the SingleArgStrCat() optimization is only enabled for
+// libc++. The reasons for this are:
+// 1) The SSO size for libc++ is 23, while libstdc++ and MSSTL have an SSO size
+// of 15. Since IntegerToString unconditionally resizes the string to 22 bytes,
+// this causes both libstdc++ and MSSTL to allocate.
+// 2) strings_internal::STLStringResizeUninitialized() only has an
+// implementation that avoids initialization when using libc++. This isn't as
+// relevant as (1), and the cost should be benchmarked if (1) ever changes on
+// libstc++ or MSSTL.
+#ifdef _LIBCPP_VERSION
+#define ABSL_INTERNAL_STRCAT_ENABLE_FAST_CASE true
+#else
+#define ABSL_INTERNAL_STRCAT_ENABLE_FAST_CASE false
+#endif
 
-template <typename T,
-          typename = std::enable_if_t<std::is_arithmetic<T>::value &&
-                                      !std::is_same<T, char>::value &&
-                                      !std::is_same<T, bool>::value>>
+template <typename T, typename = std::enable_if_t<
+                          ABSL_INTERNAL_STRCAT_ENABLE_FAST_CASE &&
+                          std::is_arithmetic<T>{} && !std::is_same<T, char>{}>>
 using EnableIfFastCase = T;
 
+#undef ABSL_INTERNAL_STRCAT_ENABLE_FAST_CASE
+
 }  // namespace strings_internal
 
 ABSL_MUST_USE_RESULT inline std::string StrCat() { return std::string(); }
@@ -553,68 +600,6 @@
              static_cast<const AlphaNum&>(args).Piece()...});
 }
 
-template <class String, class T>
-std::enable_if_t<
-    std::is_integral<absl::strings_internal::EnableIfFastCase<T>>::value, void>
-StrAppend(absl::Nonnull<String*> result, T i) {
-  return absl::strings_internal::SingleArgStrAppend(*result, i);
-}
-
-// This overload is only selected if all the parameters are numbers that can be
-// handled quickly.
-// Later we can look into how we can extend this to more general argument
-// mixtures without bloating codegen too much, or copying unnecessarily.
-template <typename String, typename... T>
-std::enable_if_t<
-    (sizeof...(T) > 1),
-    std::common_type_t<std::conditional_t<
-        true, void, absl::strings_internal::EnableIfFastCase<T>>...>>
-StrAppend(absl::Nonnull<String*> str, T... args) {
-  // Do not add unnecessary variables, logic, or even "free" lambdas here.
-  // They can add overhead for the compiler and/or at run time.
-  // Furthermore, assume this function will be inlined.
-  // This function is carefully tailored to be able to be largely optimized away
-  // so that it becomes near-equivalent to the caller handling each argument
-  // individually while minimizing register pressure, so that the compiler
-  // can inline it with minimal overhead.
-
-  // First, calculate the total length, so we can perform just a single resize.
-  // Save all the lengths for later.
-  size_t total_length = 0;
-  const ptrdiff_t lengths[] = {
-      absl::numbers_internal::GetNumDigitsOrNegativeIfNegative(args)...};
-  for (const ptrdiff_t possibly_negative_length : lengths) {
-    // Lengths are negative for negative numbers. Keep them for later use, but
-    // take their absolute values for calculating total lengths;
-    total_length += possibly_negative_length < 0
-                        ? static_cast<size_t>(-possibly_negative_length)
-                        : static_cast<size_t>(possibly_negative_length);
-  }
-
-  // Now reserve space for all the arguments.
-  const size_t old_size = str->size();
-  absl::strings_internal::STLStringAppendUninitializedAmortized(str,
-                                                                total_length);
-
-  // Finally, output each argument one-by-one, from left to right.
-  size_t i = 0;  // The current argument we're processing
-  ptrdiff_t n;   // The length of the current argument
-  typename String::pointer pos = &(*str)[old_size];
-  using SomeTrivialEmptyType = std::false_type;
-  // Ugly code due to the lack of C++14 fold expression makes us.
-  const SomeTrivialEmptyType dummy1;
-  for (const SomeTrivialEmptyType& dummy2 :
-       {(/* Comma expressions are poor man's C++17 fold expression for C++14 */
-         (void)(n = lengths[i]),
-         (void)(n < 0 ? (void)(*pos++ = '-'), (n = ~n) : 0),
-         (void)absl::numbers_internal::FastIntToBufferBackward(
-             absl::numbers_internal::UnsignedAbsoluteValue(std::move(args)),
-             pos += n, static_cast<uint32_t>(n)),
-         (void)++i, dummy1)...}) {
-    (void)dummy2;  // Remove & migrate to fold expressions in C++17
-  }
-}
-
 // Helper function for the future StrCat default floating-point format, %.6g
 // This is fast.
 inline strings_internal::AlphaNumBuffer<
diff --git a/absl/strings/str_cat_test.cc b/absl/strings/str_cat_test.cc
index b30a86f..66eddf0 100644
--- a/absl/strings/str_cat_test.cc
+++ b/absl/strings/str_cat_test.cc
@@ -39,24 +39,6 @@
 
 namespace {
 
-template <typename Integer>
-void VerifyInteger(Integer value) {
-  const std::string expected = std::to_string(value);
-
-  EXPECT_EQ(absl::StrCat(value), expected);
-
-  const char* short_prefix = "x";
-  const char* long_prefix = "2;k.msabxiuow2[09i;o3k21-93-9=29]";
-
-  std::string short_str = short_prefix;
-  absl::StrAppend(&short_str, value);
-  EXPECT_EQ(short_str, short_prefix + expected);
-
-  std::string long_str = long_prefix;
-  absl::StrAppend(&long_str, value);
-  EXPECT_EQ(long_str, long_prefix + expected);
-}
-
 // Test absl::StrCat of ints and longs of various sizes and signdedness.
 TEST(StrCat, Ints) {
   const short s = -1;  // NOLINT(runtime/int)
@@ -86,34 +68,6 @@
   EXPECT_EQ(answer, "-9-12");
   answer = absl::StrCat(uintptr, 0);
   EXPECT_EQ(answer, "130");
-
-  for (const uint32_t base : {2u, 10u}) {
-    for (const int extra_shift : {0, 12}) {
-      for (uint64_t i = 0; i < (1 << 8); ++i) {
-        uint64_t j = i;
-        while (true) {
-          uint64_t v = j ^ (extra_shift != 0 ? (j << extra_shift) * base : 0);
-          VerifyInteger(static_cast<bool>(v));
-          VerifyInteger(static_cast<wchar_t>(v));
-          VerifyInteger(static_cast<signed char>(v));
-          VerifyInteger(static_cast<unsigned char>(v));
-          VerifyInteger(static_cast<short>(v));               // NOLINT
-          VerifyInteger(static_cast<unsigned short>(v));      // NOLINT
-          VerifyInteger(static_cast<int>(v));                 // NOLINT
-          VerifyInteger(static_cast<unsigned int>(v));        // NOLINT
-          VerifyInteger(static_cast<long>(v));                // NOLINT
-          VerifyInteger(static_cast<unsigned long>(v));       // NOLINT
-          VerifyInteger(static_cast<long long>(v));           // NOLINT
-          VerifyInteger(static_cast<unsigned long long>(v));  // NOLINT
-          const uint64_t next = j == 0 ? 1 : j * base;
-          if (next <= j) {
-            break;
-          }
-          j = next;
-        }
-      }
-    }
-  }
 }
 
 TEST(StrCat, Enums) {
diff --git a/absl/strings/str_format.h b/absl/strings/str_format.h
index 66b6af5..76904d3 100644
--- a/absl/strings/str_format.h
+++ b/absl/strings/str_format.h
@@ -181,7 +181,7 @@
 // For a `FormatSpec` to be valid at compile-time, it must be provided as
 // either:
 //
-// * A `constexpr` literal or `absl::string_view`, which is how it most often
+// * A `constexpr` literal or `absl::string_view`, which is how it is most often
 //   used.
 // * A `ParsedFormat` instantiation, which ensures the format string is
 //   valid before use. (See below.)
diff --git a/absl/strings/str_join.h b/absl/strings/str_join.h
index 6a92c0f..8d7bc6b 100644
--- a/absl/strings/str_join.h
+++ b/absl/strings/str_join.h
@@ -247,12 +247,20 @@
   return strings_internal::JoinRange(range, separator, fmt);
 }
 
-template <typename T, typename Formatter>
+template <typename T, typename Formatter,
+          typename = typename std::enable_if<
+              !std::is_convertible<T, absl::string_view>::value>::type>
 std::string StrJoin(std::initializer_list<T> il, absl::string_view separator,
                     Formatter&& fmt) {
   return strings_internal::JoinRange(il, separator, fmt);
 }
 
+template <typename Formatter>
+inline std::string StrJoin(std::initializer_list<absl::string_view> il,
+                           absl::string_view separator, Formatter&& fmt) {
+  return strings_internal::JoinRange(il, separator, fmt);
+}
+
 template <typename... T, typename Formatter>
 std::string StrJoin(const std::tuple<T...>& value, absl::string_view separator,
                     Formatter&& fmt) {
@@ -269,16 +277,22 @@
   return strings_internal::JoinRange(range, separator);
 }
 
-template <typename T>
-std::string StrJoin(std::initializer_list<T> il,
-                    absl::string_view separator) {
+template <typename T, typename = typename std::enable_if<!std::is_convertible<
+                          T, absl::string_view>::value>::type>
+std::string StrJoin(std::initializer_list<T> il, absl::string_view separator) {
+  return strings_internal::JoinRange(il, separator);
+}
+
+inline std::string StrJoin(std::initializer_list<absl::string_view> il,
+                           absl::string_view separator) {
   return strings_internal::JoinRange(il, separator);
 }
 
 template <typename... T>
 std::string StrJoin(const std::tuple<T...>& value,
                     absl::string_view separator) {
-  return strings_internal::JoinAlgorithm(value, separator, AlphaNumFormatter());
+  return strings_internal::JoinTuple(value, separator,
+                                     std::index_sequence_for<T...>{});
 }
 
 ABSL_NAMESPACE_END
diff --git a/absl/strings/str_join_benchmark.cc b/absl/strings/str_join_benchmark.cc
index d6f689f..be7a725 100644
--- a/absl/strings/str_join_benchmark.cc
+++ b/absl/strings/str_join_benchmark.cc
@@ -16,6 +16,7 @@
 #include "absl/strings/str_join.h"
 
 #include <string>
+#include <tuple>
 #include <vector>
 #include <utility>
 
@@ -94,4 +95,13 @@
     ->ArgPair(16, 256)
     ->ArgPair(256, 256);
 
+void BM_JoinTuple(benchmark::State& state) {
+  for (auto _ : state) {
+    std::string s =
+        absl::StrJoin(std::make_tuple(123456789, 987654321, 24680, 13579), "/");
+    benchmark::DoNotOptimize(s);
+  }
+}
+BENCHMARK(BM_JoinTuple);
+
 }  // namespace
diff --git a/absl/strings/str_join_test.cc b/absl/strings/str_join_test.cc
index 449f95b..cd52e11 100644
--- a/absl/strings/str_join_test.cc
+++ b/absl/strings/str_join_test.cc
@@ -428,6 +428,42 @@
   }
 }
 
+TEST(StrJoin, StringViewInitializerList) {
+  {
+    // Tests initializer_list of string_views
+    std::string b = "b";
+    EXPECT_EQ("a-b-c", absl::StrJoin({"a", b, "c"}, "-"));
+  }
+  {
+    // Tests initializer_list of string_views with a non-default formatter
+    TestingParenFormatter f;
+    std::string b = "b";
+    EXPECT_EQ("(a)-(b)-(c)", absl::StrJoin({"a", b, "c"}, "-", f));
+  }
+
+  class NoCopy {
+   public:
+    explicit NoCopy(absl::string_view view) : view_(view) {}
+    NoCopy(const NoCopy&) = delete;
+    operator absl::string_view() { return view_; }  // NOLINT
+   private:
+    absl::string_view view_;
+  };
+  {
+    // Tests initializer_list of string_views preferred over initializer_list<T>
+    // for T that is implicitly convertible to string_view
+    EXPECT_EQ("a-b-c",
+              absl::StrJoin({NoCopy("a"), NoCopy("b"), NoCopy("c")}, "-"));
+  }
+  {
+    // Tests initializer_list of string_views preferred over initializer_list<T>
+    // for T that is implicitly convertible to string_view
+    TestingParenFormatter f;
+    EXPECT_EQ("(a)-(b)-(c)",
+              absl::StrJoin({NoCopy("a"), NoCopy("b"), NoCopy("c")}, "-", f));
+  }
+}
+
 TEST(StrJoin, Tuple) {
   EXPECT_EQ("", absl::StrJoin(std::make_tuple(), "-"));
   EXPECT_EQ("hello", absl::StrJoin(std::make_tuple("hello"), "-"));
diff --git a/absl/strings/str_split.h b/absl/strings/str_split.h
index 8754027..ba176fc 100644
--- a/absl/strings/str_split.h
+++ b/absl/strings/str_split.h
@@ -456,7 +456,7 @@
 //   // Stores results in a std::set<std::string>, which also performs
 //   // de-duplication and orders the elements in ascending order.
 //   std::set<std::string> a = absl::StrSplit("b,a,c,a,b", ',');
-//   // v[0] == "a", v[1] == "b", v[2] = "c"
+//   // a[0] == "a", a[1] == "b", a[2] == "c"
 //
 //   // `StrSplit()` can be used within a range-based for loop, in which case
 //   // each element will be of type `absl::string_view`.
@@ -544,7 +544,7 @@
       typename strings_internal::SelectDelimiter<Delimiter>::type;
   return strings_internal::Splitter<DelimiterType, Predicate,
                                     absl::string_view>(
-      text.value(), DelimiterType(d), std::move(p));
+      text.value(), DelimiterType(std::move(d)), std::move(p));
 }
 
 template <typename Delimiter, typename Predicate, typename StringType,
diff --git a/absl/strings/string_view.h b/absl/strings/string_view.h
index 04ca0a3..ff76001 100644
--- a/absl/strings/string_view.h
+++ b/absl/strings/string_view.h
@@ -159,7 +159,7 @@
 //
 //   absl::string_view() == absl::string_view("", 0)
 //   absl::string_view(nullptr, 0) == absl::string_view("abcdef"+6, 0)
-class string_view {
+class ABSL_INTERNAL_ATTRIBUTE_VIEW string_view {
  public:
   using traits_type = std::char_traits<char>;
   using value_type = char;
@@ -173,6 +173,7 @@
   using reverse_iterator = const_reverse_iterator;
   using size_type = size_t;
   using difference_type = std::ptrdiff_t;
+  using absl_internal_is_view = std::true_type;
 
   static constexpr size_type npos = static_cast<size_type>(-1);
 
@@ -670,7 +671,7 @@
   }
 
   static constexpr size_type StrlenInternal(absl::Nonnull<const char*> str) {
-#if defined(_MSC_VER) && _MSC_VER >= 1910 && !defined(__clang__)
+#if defined(_MSC_VER) && !defined(__clang__)
     // MSVC 2017+ can evaluate this at compile-time.
     const char* begin = str;
     while (*str != '\0') ++str;
diff --git a/absl/strings/string_view_test.cc b/absl/strings/string_view_test.cc
index 5b1eb01..e978fc3 100644
--- a/absl/strings/string_view_test.cc
+++ b/absl/strings/string_view_test.cc
@@ -32,6 +32,7 @@
 
 #include "gtest/gtest.h"
 #include "absl/base/config.h"
+#include "absl/meta/type_traits.h"
 
 #if defined(ABSL_HAVE_STD_STRING_VIEW) || defined(__ANDROID__)
 // We don't control the death messaging when using std::string_view.
@@ -46,6 +47,14 @@
 
 namespace {
 
+static_assert(!absl::type_traits_internal::IsOwner<absl::string_view>::value &&
+                  absl::type_traits_internal::IsView<absl::string_view>::value,
+              "string_view is a view, not an owner");
+
+static_assert(absl::type_traits_internal::IsLifetimeBoundAssignment<
+                  absl::string_view, std::string>::value,
+              "lifetimebound assignment not detected");
+
 // A minimal allocator that uses malloc().
 template <typename T>
 struct Mallocator {
@@ -1051,9 +1060,6 @@
     EXPECT_EQ(0u, s.size());
     EXPECT_EQ(absl::string_view(), s);
   }
-#if !defined(_MSC_VER) || _MSC_VER >= 1910
-  // MSVC 2017+ is required for good constexpr string_view support.
-  // See the implementation of `absl::string_view::StrlenInternal()`.
   {
     static constexpr char kHi[] = "hi";
     absl::string_view s = absl::NullSafeStringView(kHi);
@@ -1066,7 +1072,6 @@
     EXPECT_EQ(s.size(), 5u);
     EXPECT_EQ("hello", s);
   }
-#endif
 }
 
 TEST(StringViewTest, ConstexprCompiles) {
diff --git a/absl/strings/substitute.cc b/absl/strings/substitute.cc
index dd32c75..a71f565 100644
--- a/absl/strings/substitute.cc
+++ b/absl/strings/substitute.cc
@@ -18,6 +18,7 @@
 #include <cassert>
 #include <cstddef>
 #include <cstdint>
+#include <limits>
 #include <string>
 
 #include "absl/base/config.h"
@@ -84,6 +85,9 @@
 
   // Build the string.
   size_t original_size = output->size();
+  ABSL_INTERNAL_CHECK(
+      size <= std::numeric_limits<size_t>::max() - original_size,
+      "size_t overflow");
   strings_internal::STLStringResizeUninitializedAmortized(output,
                                                           original_size + size);
   char* target = &(*output)[original_size];
diff --git a/absl/synchronization/BUILD.bazel b/absl/synchronization/BUILD.bazel
index de06ebd..dafeba3 100644
--- a/absl/synchronization/BUILD.bazel
+++ b/absl/synchronization/BUILD.bazel
@@ -69,7 +69,9 @@
         "//absl/base:core_headers",
         "//absl/base:raw_logging_internal",
         "//absl/time",
-    ],
+    ] + select({
+        "//conditions:default": [],
+    }),
 )
 
 cc_test(
@@ -183,7 +185,7 @@
 
 cc_binary(
     name = "blocking_counter_benchmark",
-    testonly = 1,
+    testonly = True,
     srcs = ["blocking_counter_benchmark.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
@@ -230,7 +232,7 @@
 
 cc_library(
     name = "thread_pool",
-    testonly = 1,
+    testonly = True,
     hdrs = ["internal/thread_pool.h"],
     linkopts = ABSL_DEFAULT_LINKOPTS,
     visibility = [
@@ -281,7 +283,7 @@
 
 cc_library(
     name = "mutex_benchmark_common",
-    testonly = 1,
+    testonly = True,
     srcs = ["mutex_benchmark.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
@@ -300,7 +302,7 @@
 
 cc_binary(
     name = "mutex_benchmark",
-    testonly = 1,
+    testonly = True,
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
@@ -326,7 +328,7 @@
 
 cc_library(
     name = "per_thread_sem_test_common",
-    testonly = 1,
+    testonly = True,
     srcs = ["internal/per_thread_sem_test.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
diff --git a/absl/synchronization/internal/graphcycles.cc b/absl/synchronization/internal/graphcycles.cc
index 39b1848..129067c 100644
--- a/absl/synchronization/internal/graphcycles.cc
+++ b/absl/synchronization/internal/graphcycles.cc
@@ -211,7 +211,7 @@
   Vec<int32_t> table_;
   uint32_t occupied_;     // Count of non-empty slots (includes deleted slots)
 
-  static uint32_t Hash(int32_t a) { return static_cast<uint32_t>(a * 41); }
+  static uint32_t Hash(int32_t a) { return static_cast<uint32_t>(a) * 41; }
 
   // Return index for storing v.  May return an empty index or deleted index
   uint32_t FindIndex(int32_t v) const {
@@ -333,7 +333,7 @@
 
  private:
   // Number of buckets in hash table for pointer lookups.
-  static constexpr uint32_t kHashTableSize = 8171;  // should be prime
+  static constexpr uint32_t kHashTableSize = 262139;  // should be prime
 
   const Vec<Node*>* nodes_;
   std::array<int32_t, kHashTableSize> table_;
@@ -365,6 +365,14 @@
   return (n->version == NodeVersion(id)) ? n : nullptr;
 }
 
+void GraphCycles::TestOnlyAddNodes(uint32_t n) {
+  uint32_t old_size = rep_->nodes_.size();
+  rep_->nodes_.resize(n);
+  for (auto i = old_size; i < n; ++i) {
+    rep_->nodes_[i] = nullptr;
+  }
+}
+
 GraphCycles::GraphCycles() {
   InitArenaIfNecessary();
   rep_ = new (base_internal::LowLevelAlloc::AllocWithArena(sizeof(Rep), arena))
@@ -373,6 +381,7 @@
 
 GraphCycles::~GraphCycles() {
   for (auto* node : rep_->nodes_) {
+    if (node == nullptr) { continue; }
     node->Node::~Node();
     base_internal::LowLevelAlloc::Free(node);
   }
diff --git a/absl/synchronization/internal/graphcycles.h b/absl/synchronization/internal/graphcycles.h
index ceba33e..08f304b 100644
--- a/absl/synchronization/internal/graphcycles.h
+++ b/absl/synchronization/internal/graphcycles.h
@@ -126,6 +126,11 @@
   // Expensive: should only be called from graphcycles_test.cc.
   bool CheckInvariants() const;
 
+  // Test-only method to add more nodes. The nodes will not be valid, and this
+  // method should only be used to test the behavior of the graph when it is
+  // very full.
+  void TestOnlyAddNodes(uint32_t n);
+
   // ----------------------------------------------------
   struct Rep;
  private:
diff --git a/absl/synchronization/internal/graphcycles_test.cc b/absl/synchronization/internal/graphcycles_test.cc
index 3c6ef79..47410aa 100644
--- a/absl/synchronization/internal/graphcycles_test.cc
+++ b/absl/synchronization/internal/graphcycles_test.cc
@@ -14,6 +14,7 @@
 
 #include "absl/synchronization/internal/graphcycles.h"
 
+#include <climits>
 #include <map>
 #include <random>
 #include <unordered_set>
@@ -458,6 +459,24 @@
   CheckInvariants(g_);
 }
 
+TEST(GraphCycles, IntegerOverflow) {
+  GraphCycles graph_cycles;
+  char *buf = (char *)nullptr;
+  GraphId prev_id = graph_cycles.GetId(buf);
+  buf += 1;
+  GraphId id = graph_cycles.GetId(buf);
+  ASSERT_TRUE(graph_cycles.InsertEdge(prev_id, id));
+
+  // INT_MAX / 40 is enough to cause an overflow when multiplied by 41.
+  graph_cycles.TestOnlyAddNodes(INT_MAX / 40);
+
+  buf += 1;
+  GraphId newid = graph_cycles.GetId(buf);
+  graph_cycles.HasEdge(prev_id, newid);
+
+  graph_cycles.RemoveNode(buf);
+}
+
 }  // namespace synchronization_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/absl/synchronization/internal/kernel_timeout_test.cc b/absl/synchronization/internal/kernel_timeout_test.cc
index bc54671..33962f8 100644
--- a/absl/synchronization/internal/kernel_timeout_test.cc
+++ b/absl/synchronization/internal/kernel_timeout_test.cc
@@ -36,7 +36,7 @@
 extern "C" int clock_gettime(clockid_t c, struct timespec* ts) {
   if (c == CLOCK_MONOTONIC &&
       !absl::synchronization_internal::KernelTimeout::SupportsSteadyClock()) {
-    absl::SharedBitGen gen;
+    thread_local absl::BitGen gen;  // NOLINT
     ts->tv_sec = absl::Uniform(gen, 0, 1'000'000'000);
     ts->tv_nsec = absl::Uniform(gen, 0, 1'000'000'000);
     return 0;
@@ -58,7 +58,8 @@
 
 using absl::synchronization_internal::KernelTimeout;
 
-TEST(KernelTimeout, FiniteTimes) {
+// TODO(b/348224897): re-enabled when the flakiness is fixed.
+TEST(KernelTimeout, DISABLED_FiniteTimes) {
   constexpr absl::Duration kDurationsToTest[] = {
     absl::ZeroDuration(),
     absl::Nanoseconds(1),
@@ -228,7 +229,8 @@
   EXPECT_EQ(t.ToChronoDuration(), std::chrono::nanoseconds(0));
 }
 
-TEST(KernelTimeout, FiniteDurations) {
+// TODO(b/348224897): re-enabled when the flakiness is fixed.
+TEST(KernelTimeout, DISABLED_FiniteDurations) {
   constexpr absl::Duration kDurationsToTest[] = {
     absl::ZeroDuration(),
     absl::Nanoseconds(1),
@@ -274,7 +276,8 @@
   }
 }
 
-TEST(KernelTimeout, NegativeDurations) {
+// TODO(b/348224897): re-enabled when the flakiness is fixed.
+TEST(KernelTimeout, DISABLED_NegativeDurations) {
   constexpr absl::Duration kDurationsToTest[] = {
     -absl::ZeroDuration(),
     -absl::Nanoseconds(1),
diff --git a/absl/synchronization/internal/per_thread_sem_test.cc b/absl/synchronization/internal/per_thread_sem_test.cc
index 24a6b54..e3cf41d 100644
--- a/absl/synchronization/internal/per_thread_sem_test.cc
+++ b/absl/synchronization/internal/per_thread_sem_test.cc
@@ -159,7 +159,11 @@
   const absl::Duration elapsed = absl::Now() - start;
   // Allow for a slight early return, to account for quality of implementation
   // issues on various platforms.
-  const absl::Duration slop = absl::Milliseconds(1);
+  absl::Duration slop = absl::Milliseconds(1);
+#ifdef _MSC_VER
+  // Use higher slop on MSVC due to flaky test failures.
+  slop = absl::Milliseconds(16);
+#endif
   EXPECT_LE(delay - slop, elapsed)
       << "Wait returned " << delay - elapsed
       << " early (with " << slop << " slop), start time was " << start;
diff --git a/absl/synchronization/internal/waiter_test.cc b/absl/synchronization/internal/waiter_test.cc
index 992db29..da13896 100644
--- a/absl/synchronization/internal/waiter_test.cc
+++ b/absl/synchronization/internal/waiter_test.cc
@@ -44,7 +44,7 @@
 extern "C" int clock_gettime(clockid_t c, struct timespec* ts) {
   if (c == CLOCK_MONOTONIC &&
       !absl::synchronization_internal::KernelTimeout::SupportsSteadyClock()) {
-    absl::SharedBitGen gen;
+    thread_local absl::BitGen gen;  // NOLINT
     ts->tv_sec = absl::Uniform(gen, 0, 1'000'000'000);
     ts->tv_nsec = absl::Uniform(gen, 0, 1'000'000'000);
     return 0;
diff --git a/absl/synchronization/lifetime_test.cc b/absl/synchronization/lifetime_test.cc
index d5ce35a..4c4cff6 100644
--- a/absl/synchronization/lifetime_test.cc
+++ b/absl/synchronization/lifetime_test.cc
@@ -123,10 +123,9 @@
 };
 
 // These tests require that the compiler correctly supports C++11 constant
-// initialization... but MSVC has a known regression since v19.10 till v19.25:
+// initialization... but MSVC has a known regression (since v19.10) till v19.25:
 // https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html
-#if defined(__clang__) || \
-    !(defined(_MSC_VER) && _MSC_VER > 1900 && _MSC_VER < 1925)
+#if defined(__clang__) || !(defined(_MSC_VER) && _MSC_VER < 1925)
 // kConstInit
 // Test early usage.  (Declaration comes first; definitions must appear after
 // the test runner.)
diff --git a/absl/synchronization/mutex.h b/absl/synchronization/mutex.h
index d53a22b..be3f1f5 100644
--- a/absl/synchronization/mutex.h
+++ b/absl/synchronization/mutex.h
@@ -148,7 +148,7 @@
 //
 // See also `MutexLock`, below, for scoped `Mutex` acquisition.
 
-class ABSL_LOCKABLE Mutex {
+class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED Mutex {
  public:
   // Creates a `Mutex` that is not held by anyone. This constructor is
   // typically used for Mutexes allocated on the heap or the stack.
@@ -190,7 +190,7 @@
   // If the mutex can be acquired without blocking, does so exclusively and
   // returns `true`. Otherwise, returns `false`. Returns `true` with high
   // probability if the `Mutex` was free.
-  bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true);
+  ABSL_MUST_USE_RESULT bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true);
 
   // Mutex::AssertHeld()
   //
@@ -255,7 +255,7 @@
   // If the mutex can be acquired without blocking, acquires this mutex for
   // shared access and returns `true`. Otherwise, returns `false`. Returns
   // `true` with high probability if the `Mutex` was free or shared.
-  bool ReaderTryLock() ABSL_SHARED_TRYLOCK_FUNCTION(true);
+  ABSL_MUST_USE_RESULT bool ReaderTryLock() ABSL_SHARED_TRYLOCK_FUNCTION(true);
 
   // Mutex::AssertReaderHeld()
   //
@@ -281,7 +281,8 @@
 
   void WriterUnlock() ABSL_UNLOCK_FUNCTION() { this->Unlock(); }
 
-  bool WriterTryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
+  ABSL_MUST_USE_RESULT bool WriterTryLock()
+      ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
     return this->TryLock();
   }
 
diff --git a/absl/time/BUILD.bazel b/absl/time/BUILD.bazel
index e3fe705..05f1f2f 100644
--- a/absl/time/BUILD.bazel
+++ b/absl/time/BUILD.bazel
@@ -65,7 +65,7 @@
 
 cc_library(
     name = "test_util",
-    testonly = 1,
+    testonly = True,
     srcs = ["internal/test_util.cc"],
     hdrs = ["internal/test_util.h"],
     copts = ABSL_DEFAULT_COPTS,
diff --git a/absl/time/CMakeLists.txt b/absl/time/CMakeLists.txt
index e1ade7a..fe625f2 100644
--- a/absl/time/CMakeLists.txt
+++ b/absl/time/CMakeLists.txt
@@ -83,7 +83,7 @@
     Threads::Threads
     # TODO(#1495): Use $<LINK_LIBRARY:FRAMEWORK,CoreFoundation> once our
     # minimum CMake version >= 3.24
-    $<$<PLATFORM_ID:Darwin>:-Wl,-framework,CoreFoundation>
+    $<$<PLATFORM_ID:Darwin,iOS,tvOS,visionOS,watchOS>:-Wl,-framework,CoreFoundation>
 )
 
 # Internal-only target, do not depend on directly.
diff --git a/absl/time/civil_time.h b/absl/time/civil_time.h
index 3e904a1..d198eba 100644
--- a/absl/time/civil_time.h
+++ b/absl/time/civil_time.h
@@ -55,7 +55,7 @@
 // Example:
 //
 //   // Construct a civil-time object for a specific day
-//   const absl::CivilDay cd(1969, 07, 20);
+//   const absl::CivilDay cd(1969, 7, 20);
 //
 //   // Construct a civil-time object for a specific second
 //   const absl::CivilSecond cd(2018, 8, 1, 12, 0, 1);
@@ -65,7 +65,7 @@
 // Example:
 //
 //   // Valid in C++14
-//   constexpr absl::CivilDay cd(1969, 07, 20);
+//   constexpr absl::CivilDay cd(1969, 7, 20);
 
 #ifndef ABSL_TIME_CIVIL_TIME_H_
 #define ABSL_TIME_CIVIL_TIME_H_
diff --git a/absl/time/clock.cc b/absl/time/clock.cc
index aa74367..ecd539e 100644
--- a/absl/time/clock.cc
+++ b/absl/time/clock.cc
@@ -88,11 +88,25 @@
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 namespace time_internal {
+
+// On some processors, consecutive reads of the cycle counter may yield the
+// same value (weakly-increasing). In debug mode, clear the least significant
+// bits to discourage depending on a strictly-increasing Now() value.
+// In x86-64's debug mode, discourage depending on a strictly-increasing Now()
+// value.
+#if !defined(NDEBUG) && defined(__x86_64__)
+constexpr int64_t kCycleClockNowMask = ~int64_t{0xff};
+#else
+constexpr int64_t kCycleClockNowMask = ~int64_t{0};
+#endif
+
 // This is a friend wrapper around UnscaledCycleClock::Now()
 // (needed to access UnscaledCycleClock).
 class UnscaledCycleClockWrapperForGetCurrentTime {
  public:
-  static int64_t Now() { return base_internal::UnscaledCycleClock::Now(); }
+  static int64_t Now() {
+    return base_internal::UnscaledCycleClock::Now() & kCycleClockNowMask;
+  }
 };
 }  // namespace time_internal
 
diff --git a/absl/time/duration.cc b/absl/time/duration.cc
index bdb16e2..8d0b66f 100644
--- a/absl/time/duration.cc
+++ b/absl/time/duration.cc
@@ -219,7 +219,7 @@
                  ? static_cast<uint128>(Uint128Low64(a) * Uint128Low64(b))
                  : a * b;
     }
-    return b == 0 ? b : (a > kuint128max / b) ? kuint128max : a * b;
+    return b == 0 ? b : (a > Uint128Max() / b) ? Uint128Max() : a * b;
   }
 };
 
@@ -280,33 +280,35 @@
   int64_t den_hi = time_internal::GetRepHi(den);
   uint32_t den_lo = time_internal::GetRepLo(den);
 
-  if (den_hi == 0 && den_lo == kTicksPerNanosecond) {
-    // Dividing by 1ns
-    if (num_hi >= 0 && num_hi < (kint64max - kTicksPerSecond) / 1000000000) {
-      *q = num_hi * 1000000000 + num_lo / kTicksPerNanosecond;
-      *rem = time_internal::MakeDuration(0, num_lo % den_lo);
-      return true;
-    }
-  } else if (den_hi == 0 && den_lo == 100 * kTicksPerNanosecond) {
-    // Dividing by 100ns (common when converting to Universal time)
-    if (num_hi >= 0 && num_hi < (kint64max - kTicksPerSecond) / 10000000) {
-      *q = num_hi * 10000000 + num_lo / (100 * kTicksPerNanosecond);
-      *rem = time_internal::MakeDuration(0, num_lo % den_lo);
-      return true;
-    }
-  } else if (den_hi == 0 && den_lo == 1000 * kTicksPerNanosecond) {
-    // Dividing by 1us
-    if (num_hi >= 0 && num_hi < (kint64max - kTicksPerSecond) / 1000000) {
-      *q = num_hi * 1000000 + num_lo / (1000 * kTicksPerNanosecond);
-      *rem = time_internal::MakeDuration(0, num_lo % den_lo);
-      return true;
-    }
-  } else if (den_hi == 0 && den_lo == 1000000 * kTicksPerNanosecond) {
-    // Dividing by 1ms
-    if (num_hi >= 0 && num_hi < (kint64max - kTicksPerSecond) / 1000) {
-      *q = num_hi * 1000 + num_lo / (1000000 * kTicksPerNanosecond);
-      *rem = time_internal::MakeDuration(0, num_lo % den_lo);
-      return true;
+  if (den_hi == 0) {
+    if (den_lo == kTicksPerNanosecond) {
+      // Dividing by 1ns
+      if (num_hi >= 0 && num_hi < (kint64max - kTicksPerSecond) / 1000000000) {
+        *q = num_hi * 1000000000 + num_lo / kTicksPerNanosecond;
+        *rem = time_internal::MakeDuration(0, num_lo % den_lo);
+        return true;
+      }
+    } else if (den_lo == 100 * kTicksPerNanosecond) {
+      // Dividing by 100ns (common when converting to Universal time)
+      if (num_hi >= 0 && num_hi < (kint64max - kTicksPerSecond) / 10000000) {
+        *q = num_hi * 10000000 + num_lo / (100 * kTicksPerNanosecond);
+        *rem = time_internal::MakeDuration(0, num_lo % den_lo);
+        return true;
+      }
+    } else if (den_lo == 1000 * kTicksPerNanosecond) {
+      // Dividing by 1us
+      if (num_hi >= 0 && num_hi < (kint64max - kTicksPerSecond) / 1000000) {
+        *q = num_hi * 1000000 + num_lo / (1000 * kTicksPerNanosecond);
+        *rem = time_internal::MakeDuration(0, num_lo % den_lo);
+        return true;
+      }
+    } else if (den_lo == 1000000 * kTicksPerNanosecond) {
+      // Dividing by 1ms
+      if (num_hi >= 0 && num_hi < (kint64max - kTicksPerSecond) / 1000) {
+        *q = num_hi * 1000 + num_lo / (1000000 * kTicksPerNanosecond);
+        *rem = time_internal::MakeDuration(0, num_lo % den_lo);
+        return true;
+      }
     }
   } else if (den_hi > 0 && den_lo == 0) {
     // Dividing by positive multiple of 1s
@@ -342,19 +344,10 @@
 
 }  // namespace
 
-namespace time_internal {
+namespace {
 
-// The 'satq' argument indicates whether the quotient should saturate at the
-// bounds of int64_t.  If it does saturate, the difference will spill over to
-// the remainder.  If it does not saturate, the remainder remain accurate,
-// but the returned quotient will over/underflow int64_t and should not be used.
-int64_t IDivDuration(bool satq, const Duration num, const Duration den,
+int64_t IDivSlowPath(bool satq, const Duration num, const Duration den,
                      Duration* rem) {
-  int64_t q = 0;
-  if (IDivFastPath(num, den, &q, rem)) {
-    return q;
-  }
-
   const bool num_neg = num < ZeroDuration();
   const bool den_neg = den < ZeroDuration();
   const bool quotient_neg = num_neg != den_neg;
@@ -391,7 +384,27 @@
   return -static_cast<int64_t>(Uint128Low64(quotient128 - 1) & kint64max) - 1;
 }
 
-}  // namespace time_internal
+// The 'satq' argument indicates whether the quotient should saturate at the
+// bounds of int64_t.  If it does saturate, the difference will spill over to
+// the remainder.  If it does not saturate, the remainder remain accurate,
+// but the returned quotient will over/underflow int64_t and should not be used.
+ABSL_ATTRIBUTE_ALWAYS_INLINE inline int64_t IDivDurationImpl(bool satq,
+                                                             const Duration num,
+                                                             const Duration den,
+                                                             Duration* rem) {
+  int64_t q = 0;
+  if (IDivFastPath(num, den, &q, rem)) {
+    return q;
+  }
+  return IDivSlowPath(satq, num, den, rem);
+}
+
+}  // namespace
+
+int64_t IDivDuration(Duration num, Duration den, Duration* rem) {
+  return IDivDurationImpl(true, num, den,
+                          rem);  // trunc towards zero
+}
 
 //
 // Additive operators.
@@ -475,7 +488,7 @@
 }
 
 Duration& Duration::operator%=(Duration rhs) {
-  time_internal::IDivDuration(false, *this, rhs, this);
+  IDivDurationImpl(false, *this, rhs, this);
   return *this;
 }
 
@@ -501,9 +514,7 @@
 // Trunc/Floor/Ceil.
 //
 
-Duration Trunc(Duration d, Duration unit) {
-  return d - (d % unit);
-}
+Duration Trunc(Duration d, Duration unit) { return d - (d % unit); }
 
 Duration Floor(const Duration d, const Duration unit) {
   const absl::Duration td = Trunc(d, unit);
@@ -591,15 +602,9 @@
 double ToDoubleMilliseconds(Duration d) {
   return FDivDuration(d, Milliseconds(1));
 }
-double ToDoubleSeconds(Duration d) {
-  return FDivDuration(d, Seconds(1));
-}
-double ToDoubleMinutes(Duration d) {
-  return FDivDuration(d, Minutes(1));
-}
-double ToDoubleHours(Duration d) {
-  return FDivDuration(d, Hours(1));
-}
+double ToDoubleSeconds(Duration d) { return FDivDuration(d, Seconds(1)); }
+double ToDoubleMinutes(Duration d) { return FDivDuration(d, Minutes(1)); }
+double ToDoubleHours(Duration d) { return FDivDuration(d, Hours(1)); }
 
 timespec ToTimespec(Duration d) {
   timespec ts;
diff --git a/absl/time/duration_benchmark.cc b/absl/time/duration_benchmark.cc
index 56820f3..fdb26bb 100644
--- a/absl/time/duration_benchmark.cc
+++ b/absl/time/duration_benchmark.cc
@@ -290,6 +290,26 @@
 }
 BENCHMARK(BM_Duration_IDivDuration_Hours);
 
+void BM_Duration_Modulo(benchmark::State& state) {
+  int i = 0;
+  while (state.KeepRunning()) {
+    auto mod = absl::Seconds(i) % absl::Nanoseconds(12345);
+    benchmark::DoNotOptimize(mod);
+    ++i;
+  }
+}
+BENCHMARK(BM_Duration_Modulo);
+
+void BM_Duration_Modulo_FastPath(benchmark::State& state) {
+  int i = 0;
+  while (state.KeepRunning()) {
+    auto mod = absl::Seconds(i) % absl::Milliseconds(1);
+    benchmark::DoNotOptimize(mod);
+    ++i;
+  }
+}
+BENCHMARK(BM_Duration_Modulo_FastPath);
+
 void BM_Duration_ToInt64Nanoseconds(benchmark::State& state) {
   absl::Duration d = absl::Seconds(100000);
   while (state.KeepRunning()) {
diff --git a/absl/time/duration_test.cc b/absl/time/duration_test.cc
index dcf7aad..4c801a8 100644
--- a/absl/time/duration_test.cc
+++ b/absl/time/duration_test.cc
@@ -19,6 +19,11 @@
 #include <array>
 #include <cfloat>
 #include <chrono>  // NOLINT(build/c++11)
+
+#ifdef __cpp_impl_three_way_comparison
+#include <compare>
+#endif  // __cpp_impl_three_way_comparison
+
 #include <cmath>
 #include <cstdint>
 #include <ctime>
@@ -431,6 +436,15 @@
   EXPECT_LT(-inf, any_dur);
   EXPECT_LT(-inf, inf);
   EXPECT_GT(inf, -inf);
+
+#ifdef __cpp_impl_three_way_comparison
+  EXPECT_EQ(inf <=> inf, std::strong_ordering::equal);
+  EXPECT_EQ(-inf <=> -inf, std::strong_ordering::equal);
+  EXPECT_EQ(-inf <=> inf, std::strong_ordering::less);
+  EXPECT_EQ(inf <=> -inf, std::strong_ordering::greater);
+  EXPECT_EQ(any_dur <=> inf, std::strong_ordering::less);
+  EXPECT_EQ(any_dur <=> -inf, std::strong_ordering::greater);
+#endif  // __cpp_impl_three_way_comparison
 }
 
 TEST(Duration, InfinityAddition) {
@@ -496,9 +510,20 @@
   // Interesting case
   absl::Duration almost_neg_inf = sec_min;
   EXPECT_LT(-inf, almost_neg_inf);
+
+#ifdef __cpp_impl_three_way_comparison
+  EXPECT_EQ(-inf <=> almost_neg_inf, std::strong_ordering::less);
+  EXPECT_EQ(almost_neg_inf <=> -inf, std::strong_ordering::greater);
+#endif  // __cpp_impl_three_way_comparison
+
   almost_neg_inf -= -absl::Nanoseconds(1);
   EXPECT_LT(-inf, almost_neg_inf);
 
+#ifdef __cpp_impl_three_way_comparison
+  EXPECT_EQ(-inf <=> almost_neg_inf, std::strong_ordering::less);
+  EXPECT_EQ(almost_neg_inf <=> -inf, std::strong_ordering::greater);
+#endif  // __cpp_impl_three_way_comparison
+
   // For reference: IEEE 754 behavior
   const double dbl_inf = std::numeric_limits<double>::infinity();
   EXPECT_TRUE(std::isnan(dbl_inf - dbl_inf));  // We return inf
@@ -857,6 +882,21 @@
 
   EXPECT_LT(neg_full_range, full_range);
   EXPECT_EQ(neg_full_range, -full_range);
+
+#ifdef __cpp_impl_three_way_comparison
+  EXPECT_EQ(range_future <=> absl::InfiniteDuration(),
+            std::strong_ordering::less);
+  EXPECT_EQ(range_past <=> -absl::InfiniteDuration(),
+            std::strong_ordering::greater);
+  EXPECT_EQ(full_range <=> absl::ZeroDuration(),  //
+            std::strong_ordering::greater);
+  EXPECT_EQ(full_range <=> -absl::InfiniteDuration(),
+            std::strong_ordering::greater);
+  EXPECT_EQ(neg_full_range <=> -absl::InfiniteDuration(),
+            std::strong_ordering::greater);
+  EXPECT_EQ(neg_full_range <=> full_range, std::strong_ordering::less);
+  EXPECT_EQ(neg_full_range <=> -full_range, std::strong_ordering::equal);
+#endif  // __cpp_impl_three_way_comparison
 }
 
 TEST(Duration, RelationalOperators) {
@@ -880,6 +920,27 @@
 #undef TEST_REL_OPS
 }
 
+
+#ifdef __cpp_impl_three_way_comparison
+
+TEST(Duration, SpaceshipOperators) {
+#define TEST_REL_OPS(UNIT)               \
+  static_assert(UNIT(2) <=> UNIT(2) == std::strong_ordering::equal, ""); \
+  static_assert(UNIT(1) <=> UNIT(2) == std::strong_ordering::less, ""); \
+  static_assert(UNIT(3) <=> UNIT(2) == std::strong_ordering::greater, "");
+
+  TEST_REL_OPS(absl::Nanoseconds);
+  TEST_REL_OPS(absl::Microseconds);
+  TEST_REL_OPS(absl::Milliseconds);
+  TEST_REL_OPS(absl::Seconds);
+  TEST_REL_OPS(absl::Minutes);
+  TEST_REL_OPS(absl::Hours);
+
+#undef TEST_REL_OPS
+}
+
+#endif  // __cpp_impl_three_way_comparison
+
 TEST(Duration, Addition) {
 #define TEST_ADD_OPS(UNIT)                  \
   do {                                      \
diff --git a/absl/time/format.cc b/absl/time/format.cc
index 15a26b1..bd06f8f 100644
--- a/absl/time/format.cc
+++ b/absl/time/format.cc
@@ -16,6 +16,7 @@
 
 #include <cctype>
 #include <cstdint>
+#include <utility>
 
 #include "absl/strings/match.h"
 #include "absl/strings/string_view.h"
@@ -136,7 +137,7 @@
   if (b) {
     *time = Join(parts);
   } else if (err != nullptr) {
-    *err = error;
+    *err = std::move(error);
   }
   return b;
 }
diff --git a/absl/time/internal/cctz/src/time_zone_libc.cc b/absl/time/internal/cctz/src/time_zone_libc.cc
index d014612..b509402 100644
--- a/absl/time/internal/cctz/src/time_zone_libc.cc
+++ b/absl/time/internal/cctz/src/time_zone_libc.cc
@@ -12,7 +12,7 @@
 //   See the License for the specific language governing permissions and
 //   limitations under the License.
 
-#if defined(_WIN32) || defined(_WIN64)
+#if !defined(_CRT_SECURE_NO_WARNINGS) && defined(_WIN32)
 #define _CRT_SECURE_NO_WARNINGS 1
 #endif
 
diff --git a/absl/time/internal/cctz/src/time_zone_lookup.cc b/absl/time/internal/cctz/src/time_zone_lookup.cc
index d22691b..8979174 100644
--- a/absl/time/internal/cctz/src/time_zone_lookup.cc
+++ b/absl/time/internal/cctz/src/time_zone_lookup.cc
@@ -17,9 +17,6 @@
 
 #if defined(__ANDROID__)
 #include <sys/system_properties.h>
-#if defined(__ANDROID_API__) && __ANDROID_API__ >= 21
-#include <dlfcn.h>
-#endif
 #endif
 
 #if defined(__APPLE__)
@@ -66,32 +63,6 @@
 namespace cctz {
 
 namespace {
-#if defined(__ANDROID__) && defined(__ANDROID_API__) && __ANDROID_API__ >= 21
-// Android 'L' removes __system_property_get() from the NDK, however
-// it is still a hidden symbol in libc so we use dlsym() to access it.
-// See Chromium's base/sys_info_android.cc for a similar example.
-
-using property_get_func = int (*)(const char*, char*);
-
-property_get_func LoadSystemPropertyGet() {
-  int flag = RTLD_LAZY | RTLD_GLOBAL;
-#if defined(RTLD_NOLOAD)
-  flag |= RTLD_NOLOAD;  // libc.so should already be resident
-#endif
-  if (void* handle = dlopen("libc.so", flag)) {
-    void* sym = dlsym(handle, "__system_property_get");
-    dlclose(handle);
-    return reinterpret_cast<property_get_func>(sym);
-  }
-  return nullptr;
-}
-
-int __system_property_get(const char* name, char* value) {
-  static property_get_func system_property_get = LoadSystemPropertyGet();
-  return system_property_get ? system_property_get(name, value) : -1;
-}
-#endif
-
 #if defined(USE_WIN32_LOCAL_TIME_ZONE)
 // Calls the WinRT Calendar.GetTimeZone method to obtain the IANA ID of the
 // local time zone. Returns an empty vector in case of an error.
diff --git a/absl/time/internal/cctz/src/tzfile.h b/absl/time/internal/cctz/src/tzfile.h
index 114026d..2be3bb8 100644
--- a/absl/time/internal/cctz/src/tzfile.h
+++ b/absl/time/internal/cctz/src/tzfile.h
@@ -77,11 +77,11 @@
 ** time uses 8 rather than 4 chars,
 ** then a POSIX-TZ-environment-variable-style string for use in handling
 ** instants after the last transition time stored in the file
-** (with nothing between the newlines if there is no POSIX representation for
-** such instants).
+** (with nothing between the newlines if there is no POSIX.1-2017
+** representation for such instants).
 **
 ** If tz_version is '3' or greater, the above is extended as follows.
-** First, the POSIX TZ string's hour offset may range from -167
+** First, the TZ string's hour offset may range from -167
 ** through 167 as compared to the POSIX-required 0 through 24.
 ** Second, its DST start time may be January 1 at 00:00 and its stop
 ** time December 31 at 24:00 plus the difference between DST and
diff --git a/absl/time/internal/cctz/testdata/version b/absl/time/internal/cctz/testdata/version
index cd9c3f6..04fe674 100644
--- a/absl/time/internal/cctz/testdata/version
+++ b/absl/time/internal/cctz/testdata/version
@@ -1 +1 @@
-2023d
+2024a
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/America/Miquelon b/absl/time/internal/cctz/testdata/zoneinfo/America/Miquelon
index 3b62585..ba95cb0 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/America/Miquelon
+++ b/absl/time/internal/cctz/testdata/zoneinfo/America/Miquelon
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/America/Montreal b/absl/time/internal/cctz/testdata/zoneinfo/America/Montreal
index fe6be8e..668e70d 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/America/Montreal
+++ b/absl/time/internal/cctz/testdata/zoneinfo/America/Montreal
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/America/Nipigon b/absl/time/internal/cctz/testdata/zoneinfo/America/Nipigon
index fe6be8e..668e70d 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/America/Nipigon
+++ b/absl/time/internal/cctz/testdata/zoneinfo/America/Nipigon
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/America/Thunder_Bay b/absl/time/internal/cctz/testdata/zoneinfo/America/Thunder_Bay
index fe6be8e..668e70d 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/America/Thunder_Bay
+++ b/absl/time/internal/cctz/testdata/zoneinfo/America/Thunder_Bay
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/America/Toronto b/absl/time/internal/cctz/testdata/zoneinfo/America/Toronto
index fe6be8e..668e70d 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/America/Toronto
+++ b/absl/time/internal/cctz/testdata/zoneinfo/America/Toronto
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/Asia/Almaty b/absl/time/internal/cctz/testdata/zoneinfo/Asia/Almaty
index 3ec4fc8..02f047d 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/Asia/Almaty
+++ b/absl/time/internal/cctz/testdata/zoneinfo/Asia/Almaty
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/Asia/Gaza b/absl/time/internal/cctz/testdata/zoneinfo/Asia/Gaza
index 6241b4e..0d79662 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/Asia/Gaza
+++ b/absl/time/internal/cctz/testdata/zoneinfo/Asia/Gaza
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hebron b/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hebron
index 5267de9..53a3c14 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hebron
+++ b/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hebron
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ho_Chi_Minh b/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ho_Chi_Minh
index de53596..86e21b0 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ho_Chi_Minh
+++ b/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ho_Chi_Minh
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/Asia/Qostanay b/absl/time/internal/cctz/testdata/zoneinfo/Asia/Qostanay
index ff6fe61..109fe41 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/Asia/Qostanay
+++ b/absl/time/internal/cctz/testdata/zoneinfo/Asia/Qostanay
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/Asia/Saigon b/absl/time/internal/cctz/testdata/zoneinfo/Asia/Saigon
index de53596..86e21b0 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/Asia/Saigon
+++ b/absl/time/internal/cctz/testdata/zoneinfo/Asia/Saigon
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/Canada/Eastern b/absl/time/internal/cctz/testdata/zoneinfo/Canada/Eastern
index fe6be8e..668e70d 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/Canada/Eastern
+++ b/absl/time/internal/cctz/testdata/zoneinfo/Canada/Eastern
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/zonenow.tab b/absl/time/internal/cctz/testdata/zoneinfo/zonenow.tab
index 2dbe8f0..b6f2910 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/zonenow.tab
+++ b/absl/time/internal/cctz/testdata/zoneinfo/zonenow.tab
@@ -199,7 +199,7 @@
 XX	+3431+06912	Asia/Kabul	Afghanistan
 #
 # +05
-XX	+4120+06918	Asia/Tashkent	Russia; Tajikistan; Turkmenistan; Uzbekistan; Maldives
+XX	+4120+06918	Asia/Tashkent	Russia; west Kazakhstan; Tajikistan; Turkmenistan; Uzbekistan; Maldives
 #
 # +05 - PKT
 XX	+2452+06703	Asia/Karachi	Pakistan ("PKT")
@@ -215,6 +215,8 @@
 #
 # +06
 XX	+2343+09025	Asia/Dhaka	Russia; Kyrgyzstan; Bhutan; Bangladesh; Chagos
+# +06 until 2024-03-01; then +05
+XX	+4315+07657	Asia/Almaty	Kazakhstan (except western areas)
 #
 # +06:30
 XX	+1647+09610	Asia/Yangon	Myanmar; Cocos
diff --git a/absl/time/time.h b/absl/time/time.h
index 3758080..f133c2d 100644
--- a/absl/time/time.h
+++ b/absl/time/time.h
@@ -75,15 +75,22 @@
 struct timeval;
 #endif
 #include <chrono>  // NOLINT(build/c++11)
+
+#ifdef __cpp_impl_three_way_comparison
+#include <compare>
+#endif  // __cpp_impl_three_way_comparison
+
 #include <cmath>
 #include <cstdint>
 #include <ctime>
 #include <limits>
 #include <ostream>
+#include <ratio>  // NOLINT(build/c++11)
 #include <string>
 #include <type_traits>
 #include <utility>
 
+#include "absl/base/attributes.h"
 #include "absl/base/config.h"
 #include "absl/base/macros.h"
 #include "absl/strings/string_view.h"
@@ -98,7 +105,6 @@
 class TimeZone;  // Defined below
 
 namespace time_internal {
-int64_t IDivDuration(bool satq, Duration num, Duration den, Duration* rem);
 ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time FromUnixDuration(Duration d);
 ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration ToUnixDuration(Time t);
 ABSL_ATTRIBUTE_CONST_FUNCTION constexpr int64_t GetRepHi(Duration d);
@@ -306,6 +312,14 @@
 };
 
 // Relational Operators
+
+#ifdef __cpp_impl_three_way_comparison
+
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr std::strong_ordering operator<=>(
+    Duration lhs, Duration rhs);
+
+#endif  // __cpp_impl_three_way_comparison
+
 ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator<(Duration lhs,
                                                        Duration rhs);
 ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator>(Duration lhs,
@@ -338,30 +352,6 @@
   return lhs -= rhs;
 }
 
-// Multiplicative Operators
-// Integer operands must be representable as int64_t.
-template <typename T>
-ABSL_ATTRIBUTE_CONST_FUNCTION Duration operator*(Duration lhs, T rhs) {
-  return lhs *= rhs;
-}
-template <typename T>
-ABSL_ATTRIBUTE_CONST_FUNCTION Duration operator*(T lhs, Duration rhs) {
-  return rhs *= lhs;
-}
-template <typename T>
-ABSL_ATTRIBUTE_CONST_FUNCTION Duration operator/(Duration lhs, T rhs) {
-  return lhs /= rhs;
-}
-ABSL_ATTRIBUTE_CONST_FUNCTION inline int64_t operator/(Duration lhs,
-                                                       Duration rhs) {
-  return time_internal::IDivDuration(true, lhs, rhs,
-                                     &lhs);  // trunc towards zero
-}
-ABSL_ATTRIBUTE_CONST_FUNCTION inline Duration operator%(Duration lhs,
-                                                        Duration rhs) {
-  return lhs %= rhs;
-}
-
 // IDivDuration()
 //
 // Divides a numerator `Duration` by a denominator `Duration`, returning the
@@ -390,10 +380,7 @@
 //   // Here, q would overflow int64_t, so rem accounts for the difference.
 //   int64_t q = absl::IDivDuration(a, b, &rem);
 //   // q == std::numeric_limits<int64_t>::max(), rem == a - b * q
-inline int64_t IDivDuration(Duration num, Duration den, Duration* rem) {
-  return time_internal::IDivDuration(true, num, den,
-                                     rem);  // trunc towards zero
-}
+int64_t IDivDuration(Duration num, Duration den, Duration* rem);
 
 // FDivDuration()
 //
@@ -409,6 +396,30 @@
 //   // d == 1.5
 ABSL_ATTRIBUTE_CONST_FUNCTION double FDivDuration(Duration num, Duration den);
 
+// Multiplicative Operators
+// Integer operands must be representable as int64_t.
+template <typename T>
+ABSL_ATTRIBUTE_CONST_FUNCTION Duration operator*(Duration lhs, T rhs) {
+  return lhs *= rhs;
+}
+template <typename T>
+ABSL_ATTRIBUTE_CONST_FUNCTION Duration operator*(T lhs, Duration rhs) {
+  return rhs *= lhs;
+}
+template <typename T>
+ABSL_ATTRIBUTE_CONST_FUNCTION Duration operator/(Duration lhs, T rhs) {
+  return lhs /= rhs;
+}
+ABSL_ATTRIBUTE_CONST_FUNCTION inline int64_t operator/(Duration lhs,
+                                                       Duration rhs) {
+  return IDivDuration(lhs, rhs,
+                      &lhs);  // trunc towards zero
+}
+ABSL_ATTRIBUTE_CONST_FUNCTION inline Duration operator%(Duration lhs,
+                                                        Duration rhs) {
+  return lhs %= rhs;
+}
+
 // ZeroDuration()
 //
 // Returns a zero-length duration. This function behaves just like the default
@@ -841,6 +852,11 @@
  private:
   friend constexpr Time time_internal::FromUnixDuration(Duration d);
   friend constexpr Duration time_internal::ToUnixDuration(Time t);
+
+#ifdef __cpp_impl_three_way_comparison
+  friend constexpr std::strong_ordering operator<=>(Time lhs, Time rhs);
+#endif  // __cpp_impl_three_way_comparison
+
   friend constexpr bool operator<(Time lhs, Time rhs);
   friend constexpr bool operator==(Time lhs, Time rhs);
   friend Duration operator-(Time lhs, Time rhs);
@@ -852,6 +868,15 @@
 };
 
 // Relational Operators
+#ifdef __cpp_impl_three_way_comparison
+
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr std::strong_ordering operator<=>(
+    Time lhs, Time rhs) {
+  return lhs.rep_ <=> rhs.rep_;
+}
+
+#endif  // __cpp_impl_three_way_comparison
+
 ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator<(Time lhs, Time rhs) {
   return lhs.rep_ < rhs.rep_;
 }
@@ -1727,6 +1752,25 @@
              : time_internal::GetRepLo(lhs) < time_internal::GetRepLo(rhs);
 }
 
+
+#ifdef __cpp_impl_three_way_comparison
+
+ABSL_ATTRIBUTE_CONST_FUNCTION constexpr std::strong_ordering operator<=>(
+    Duration lhs, Duration rhs) {
+  const int64_t lhs_hi = time_internal::GetRepHi(lhs);
+  const int64_t rhs_hi = time_internal::GetRepHi(rhs);
+  if (auto c = lhs_hi <=> rhs_hi; c != std::strong_ordering::equal) {
+    return c;
+  }
+  const uint32_t lhs_lo = time_internal::GetRepLo(lhs);
+  const uint32_t rhs_lo = time_internal::GetRepLo(rhs);
+  return (lhs_hi == (std::numeric_limits<int64_t>::min)())
+             ? (lhs_lo + 1) <=> (rhs_lo + 1)
+             : lhs_lo <=> rhs_lo;
+}
+
+#endif  // __cpp_impl_three_way_comparison
+
 ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator==(Duration lhs,
                                                         Duration rhs) {
   return time_internal::GetRepHi(lhs) == time_internal::GetRepHi(rhs) &&
diff --git a/absl/time/time_test.cc b/absl/time/time_test.cc
index bcf4f2a..71f54d6 100644
--- a/absl/time/time_test.cc
+++ b/absl/time/time_test.cc
@@ -14,11 +14,21 @@
 
 #include "absl/time/time.h"
 
+#include <cstdint>
+#include <ios>
+
+#include "absl/time/civil_time.h"
+
 #if defined(_MSC_VER)
 #include <winsock2.h>  // for timeval
 #endif
 
 #include <chrono>  // NOLINT(build/c++11)
+
+#ifdef __cpp_impl_three_way_comparison
+#include <compare>
+#endif  // __cpp_impl_three_way_comparison
+
 #include <cstring>
 #include <ctime>
 #include <iomanip>
@@ -77,21 +87,21 @@
 
 TEST(Time, ConstExpr) {
   constexpr absl::Time t0 = absl::UnixEpoch();
-  static_assert(t0 == absl::Time(), "UnixEpoch");
+  static_assert(t0 == absl::UnixEpoch(), "UnixEpoch");
   constexpr absl::Time t1 = absl::InfiniteFuture();
-  static_assert(t1 != absl::Time(), "InfiniteFuture");
+  static_assert(t1 != absl::UnixEpoch(), "InfiniteFuture");
   constexpr absl::Time t2 = absl::InfinitePast();
-  static_assert(t2 != absl::Time(), "InfinitePast");
+  static_assert(t2 != absl::UnixEpoch(), "InfinitePast");
   constexpr absl::Time t3 = absl::FromUnixNanos(0);
-  static_assert(t3 == absl::Time(), "FromUnixNanos");
+  static_assert(t3 == absl::UnixEpoch(), "FromUnixNanos");
   constexpr absl::Time t4 = absl::FromUnixMicros(0);
-  static_assert(t4 == absl::Time(), "FromUnixMicros");
+  static_assert(t4 == absl::UnixEpoch(), "FromUnixMicros");
   constexpr absl::Time t5 = absl::FromUnixMillis(0);
-  static_assert(t5 == absl::Time(), "FromUnixMillis");
+  static_assert(t5 == absl::UnixEpoch(), "FromUnixMillis");
   constexpr absl::Time t6 = absl::FromUnixSeconds(0);
-  static_assert(t6 == absl::Time(), "FromUnixSeconds");
+  static_assert(t6 == absl::UnixEpoch(), "FromUnixSeconds");
   constexpr absl::Time t7 = absl::FromTimeT(0);
-  static_assert(t7 == absl::Time(), "FromTimeT");
+  static_assert(t7 == absl::UnixEpoch(), "FromTimeT");
 }
 
 TEST(Time, ValueSemantics) {
@@ -176,7 +186,7 @@
   constexpr absl::Time t2 = absl::FromUnixNanos(1);
   constexpr absl::Time t3 = absl::FromUnixNanos(2);
 
-  static_assert(absl::Time() == t1, "");
+  static_assert(absl::UnixEpoch() == t1, "");
   static_assert(t1 == t1, "");
   static_assert(t2 == t2, "");
   static_assert(t3 == t3, "");
@@ -202,6 +212,22 @@
   static_assert(t3 >= t2, "");
   static_assert(t1 >= t1, "");
   static_assert(t3 >= t1, "");
+
+#ifdef __cpp_impl_three_way_comparison
+
+  static_assert((t1 <=> t1) == std::strong_ordering::equal, "");
+  static_assert((t2 <=> t2) == std::strong_ordering::equal, "");
+  static_assert((t3 <=> t3) == std::strong_ordering::equal, "");
+
+  static_assert((t1 <=> t2) == std::strong_ordering::less, "");
+  static_assert((t2 <=> t3) == std::strong_ordering::less, "");
+  static_assert((t1 <=> t3) == std::strong_ordering::less, "");
+
+  static_assert((t2 <=> t1) == std::strong_ordering::greater, "");
+  static_assert((t3 <=> t2) == std::strong_ordering::greater, "");
+  static_assert((t3 <=> t1) == std::strong_ordering::greater, "");
+
+#endif  // __cpp_impl_three_way_comparison
 }
 
 TEST(Time, Infinity) {
@@ -213,6 +239,15 @@
   static_assert(ipast < ifuture, "");
   static_assert(ifuture > ipast, "");
 
+#ifdef __cpp_impl_three_way_comparison
+
+  static_assert((ifuture <=> ifuture) == std::strong_ordering::equal, "");
+  static_assert((ipast <=> ipast) == std::strong_ordering::equal, "");
+  static_assert((ipast <=> ifuture) == std::strong_ordering::less, "");
+  static_assert((ifuture <=> ipast) == std::strong_ordering::greater, "");
+
+#endif  // __cpp_impl_three_way_comparison
+
   // Arithmetic saturates
   EXPECT_EQ(ifuture, ifuture + absl::Seconds(1));
   EXPECT_EQ(ifuture, ifuture - absl::Seconds(1));
@@ -228,6 +263,15 @@
   static_assert(t < ifuture, "");
   static_assert(t > ipast, "");
 
+#ifdef __cpp_impl_three_way_comparison
+
+  static_assert((t <=> ifuture) == std::strong_ordering::less, "");
+  static_assert((t <=> ipast) == std::strong_ordering::greater, "");
+  static_assert((ipast <=> t) == std::strong_ordering::less, "");
+  static_assert((ifuture <=> t) == std::strong_ordering::greater, "");
+
+#endif  // __cpp_impl_three_way_comparison
+
   EXPECT_EQ(ifuture, t + absl::InfiniteDuration());
   EXPECT_EQ(ipast, t - absl::InfiniteDuration());
 }
@@ -255,7 +299,7 @@
   EXPECT_EQ(1, absl::ToUnixNanos(absl::UnixEpoch() + absl::Nanoseconds(3) / 2));
   EXPECT_EQ(1, absl::ToUnixNanos(absl::UnixEpoch() + absl::Nanoseconds(1)));
   EXPECT_EQ(0, absl::ToUnixNanos(absl::UnixEpoch() + absl::Nanoseconds(1) / 2));
-  EXPECT_EQ(0, absl::ToUnixNanos(absl::UnixEpoch() + absl::Nanoseconds(0)));
+  EXPECT_EQ(0, absl::ToUnixNanos(absl::UnixEpoch() + absl::ZeroDuration()));
   EXPECT_EQ(-1,
             absl::ToUnixNanos(absl::UnixEpoch() - absl::Nanoseconds(1) / 2));
   EXPECT_EQ(-1, absl::ToUnixNanos(absl::UnixEpoch() - absl::Nanoseconds(1)));
@@ -272,7 +316,7 @@
   EXPECT_EQ(0,
             absl::ToUniversal(absl::UniversalEpoch() + absl::Nanoseconds(1)));
   EXPECT_EQ(0,
-            absl::ToUniversal(absl::UniversalEpoch() + absl::Nanoseconds(0)));
+            absl::ToUniversal(absl::UniversalEpoch() + absl::ZeroDuration()));
   EXPECT_EQ(-1,
             absl::ToUniversal(absl::UniversalEpoch() + absl::Nanoseconds(-1)));
   EXPECT_EQ(-1,
@@ -289,13 +333,13 @@
   } to_ts[] = {
       {absl::FromUnixSeconds(1) + absl::Nanoseconds(1), {1, 1}},
       {absl::FromUnixSeconds(1) + absl::Nanoseconds(1) / 2, {1, 0}},
-      {absl::FromUnixSeconds(1) + absl::Nanoseconds(0), {1, 0}},
-      {absl::FromUnixSeconds(0) + absl::Nanoseconds(0), {0, 0}},
+      {absl::FromUnixSeconds(1) + absl::ZeroDuration(), {1, 0}},
+      {absl::FromUnixSeconds(0) + absl::ZeroDuration(), {0, 0}},
       {absl::FromUnixSeconds(0) - absl::Nanoseconds(1) / 2, {-1, 999999999}},
       {absl::FromUnixSeconds(0) - absl::Nanoseconds(1), {-1, 999999999}},
       {absl::FromUnixSeconds(-1) + absl::Nanoseconds(1), {-1, 1}},
       {absl::FromUnixSeconds(-1) + absl::Nanoseconds(1) / 2, {-1, 0}},
-      {absl::FromUnixSeconds(-1) + absl::Nanoseconds(0), {-1, 0}},
+      {absl::FromUnixSeconds(-1) + absl::ZeroDuration(), {-1, 0}},
       {absl::FromUnixSeconds(-1) - absl::Nanoseconds(1) / 2, {-2, 999999999}},
   };
   for (const auto& test : to_ts) {
@@ -306,12 +350,12 @@
     absl::Time t;
   } from_ts[] = {
       {{1, 1}, absl::FromUnixSeconds(1) + absl::Nanoseconds(1)},
-      {{1, 0}, absl::FromUnixSeconds(1) + absl::Nanoseconds(0)},
-      {{0, 0}, absl::FromUnixSeconds(0) + absl::Nanoseconds(0)},
+      {{1, 0}, absl::FromUnixSeconds(1) + absl::ZeroDuration()},
+      {{0, 0}, absl::FromUnixSeconds(0) + absl::ZeroDuration()},
       {{0, -1}, absl::FromUnixSeconds(0) - absl::Nanoseconds(1)},
       {{-1, 999999999}, absl::FromUnixSeconds(0) - absl::Nanoseconds(1)},
       {{-1, 1}, absl::FromUnixSeconds(-1) + absl::Nanoseconds(1)},
-      {{-1, 0}, absl::FromUnixSeconds(-1) + absl::Nanoseconds(0)},
+      {{-1, 0}, absl::FromUnixSeconds(-1) + absl::ZeroDuration()},
       {{-1, -1}, absl::FromUnixSeconds(-1) - absl::Nanoseconds(1)},
       {{-2, 999999999}, absl::FromUnixSeconds(-1) - absl::Nanoseconds(1)},
   };
@@ -319,36 +363,36 @@
     EXPECT_EQ(test.t, absl::TimeFromTimespec(test.ts));
   }
 
-  // Tests ToTimeval()/TimeFromTimeval() (same as timespec above)
+  // Tests  absl::ToTimeval()/TimeFromTimeval() (same as timespec above)
   const struct {
     absl::Time t;
     timeval tv;
   } to_tv[] = {
       {absl::FromUnixSeconds(1) + absl::Microseconds(1), {1, 1}},
       {absl::FromUnixSeconds(1) + absl::Microseconds(1) / 2, {1, 0}},
-      {absl::FromUnixSeconds(1) + absl::Microseconds(0), {1, 0}},
-      {absl::FromUnixSeconds(0) + absl::Microseconds(0), {0, 0}},
+      {absl::FromUnixSeconds(1) + absl::ZeroDuration(), {1, 0}},
+      {absl::FromUnixSeconds(0) + absl::ZeroDuration(), {0, 0}},
       {absl::FromUnixSeconds(0) - absl::Microseconds(1) / 2, {-1, 999999}},
       {absl::FromUnixSeconds(0) - absl::Microseconds(1), {-1, 999999}},
       {absl::FromUnixSeconds(-1) + absl::Microseconds(1), {-1, 1}},
       {absl::FromUnixSeconds(-1) + absl::Microseconds(1) / 2, {-1, 0}},
-      {absl::FromUnixSeconds(-1) + absl::Microseconds(0), {-1, 0}},
+      {absl::FromUnixSeconds(-1) + absl::ZeroDuration(), {-1, 0}},
       {absl::FromUnixSeconds(-1) - absl::Microseconds(1) / 2, {-2, 999999}},
   };
   for (const auto& test : to_tv) {
-    EXPECT_THAT(ToTimeval(test.t), TimevalMatcher(test.tv));
+    EXPECT_THAT(absl::ToTimeval(test.t), TimevalMatcher(test.tv));
   }
   const struct {
     timeval tv;
     absl::Time t;
   } from_tv[] = {
       {{1, 1}, absl::FromUnixSeconds(1) + absl::Microseconds(1)},
-      {{1, 0}, absl::FromUnixSeconds(1) + absl::Microseconds(0)},
-      {{0, 0}, absl::FromUnixSeconds(0) + absl::Microseconds(0)},
+      {{1, 0}, absl::FromUnixSeconds(1) + absl::ZeroDuration()},
+      {{0, 0}, absl::FromUnixSeconds(0) + absl::ZeroDuration()},
       {{0, -1}, absl::FromUnixSeconds(0) - absl::Microseconds(1)},
       {{-1, 999999}, absl::FromUnixSeconds(0) - absl::Microseconds(1)},
       {{-1, 1}, absl::FromUnixSeconds(-1) + absl::Microseconds(1)},
-      {{-1, 0}, absl::FromUnixSeconds(-1) + absl::Microseconds(0)},
+      {{-1, 0}, absl::FromUnixSeconds(-1) + absl::ZeroDuration()},
       {{-1, -1}, absl::FromUnixSeconds(-1) - absl::Microseconds(1)},
       {{-2, 999999}, absl::FromUnixSeconds(-1) - absl::Microseconds(1)},
   };
@@ -438,7 +482,7 @@
                              testing::Eq)
       << now_time_t;
 
-  // TimeFromTimeval() and ToTimeval()
+  // TimeFromTimeval() and  absl::ToTimeval()
   timeval tv;
   tv.tv_sec = -1;
   tv.tv_usec = 0;
@@ -723,14 +767,14 @@
 TEST(Time, ToTM) {
   const absl::TimeZone utc = absl::UTCTimeZone();
 
-  // Compares the results of ToTM() to gmtime_r() for lots of times over the
-  // course of a few days.
+  // Compares the results of absl::ToTM() to gmtime_r() for lots of times over
+  // the course of a few days.
   const absl::Time start =
       absl::FromCivil(absl::CivilSecond(2014, 1, 2, 3, 4, 5), utc);
   const absl::Time end =
       absl::FromCivil(absl::CivilSecond(2014, 1, 5, 3, 4, 5), utc);
   for (absl::Time t = start; t < end; t += absl::Seconds(30)) {
-    const struct tm tm_bt = ToTM(t, utc);
+    const struct tm tm_bt = absl::ToTM(t, utc);
     const time_t tt = absl::ToTimeT(t);
     struct tm tm_lc;
 #ifdef _WIN32
@@ -755,16 +799,16 @@
   const absl::TimeZone nyc =
       absl::time_internal::LoadTimeZone("America/New_York");
   absl::Time t = absl::FromCivil(absl::CivilSecond(2014, 3, 1, 0, 0, 0), nyc);
-  struct tm tm = ToTM(t, nyc);
+  struct tm tm = absl::ToTM(t, nyc);
   EXPECT_FALSE(tm.tm_isdst);
 
   // Checks that the tm_isdst field is correct when in daylight time.
   t = absl::FromCivil(absl::CivilSecond(2014, 4, 1, 0, 0, 0), nyc);
-  tm = ToTM(t, nyc);
+  tm = absl::ToTM(t, nyc);
   EXPECT_TRUE(tm.tm_isdst);
 
   // Checks overflow.
-  tm = ToTM(absl::InfiniteFuture(), nyc);
+  tm = absl::ToTM(absl::InfiniteFuture(), nyc);
   EXPECT_EQ(std::numeric_limits<int>::max() - 1900, tm.tm_year);
   EXPECT_EQ(11, tm.tm_mon);
   EXPECT_EQ(31, tm.tm_mday);
@@ -776,7 +820,7 @@
   EXPECT_FALSE(tm.tm_isdst);
 
   // Checks underflow.
-  tm = ToTM(absl::InfinitePast(), nyc);
+  tm = absl::ToTM(absl::InfinitePast(), nyc);
   EXPECT_EQ(std::numeric_limits<int>::min(), tm.tm_year);
   EXPECT_EQ(0, tm.tm_mon);
   EXPECT_EQ(1, tm.tm_mday);
@@ -802,13 +846,13 @@
   tm.tm_min = 2;
   tm.tm_sec = 3;
   tm.tm_isdst = -1;
-  absl::Time t = FromTM(tm, nyc);
+  absl::Time t = absl::FromTM(tm, nyc);
   EXPECT_EQ("2014-06-28T01:02:03-04:00", absl::FormatTime(t, nyc));  // DST
   tm.tm_isdst = 0;
-  t = FromTM(tm, nyc);
+  t = absl::FromTM(tm, nyc);
   EXPECT_EQ("2014-06-28T01:02:03-04:00", absl::FormatTime(t, nyc));  // DST
   tm.tm_isdst = 1;
-  t = FromTM(tm, nyc);
+  t = absl::FromTM(tm, nyc);
   EXPECT_EQ("2014-06-28T01:02:03-04:00", absl::FormatTime(t, nyc));  // DST
 
   // Adjusts tm to refer to an ambiguous time.
@@ -819,13 +863,13 @@
   tm.tm_min = 30;
   tm.tm_sec = 42;
   tm.tm_isdst = -1;
-  t = FromTM(tm, nyc);
+  t = absl::FromTM(tm, nyc);
   EXPECT_EQ("2014-11-02T01:30:42-04:00", absl::FormatTime(t, nyc));  // DST
   tm.tm_isdst = 0;
-  t = FromTM(tm, nyc);
+  t = absl::FromTM(tm, nyc);
   EXPECT_EQ("2014-11-02T01:30:42-05:00", absl::FormatTime(t, nyc));  // STD
   tm.tm_isdst = 1;
-  t = FromTM(tm, nyc);
+  t = absl::FromTM(tm, nyc);
   EXPECT_EQ("2014-11-02T01:30:42-04:00", absl::FormatTime(t, nyc));  // DST
 
   // Adjusts tm to refer to a skipped time.
@@ -836,13 +880,13 @@
   tm.tm_min = 30;
   tm.tm_sec = 42;
   tm.tm_isdst = -1;
-  t = FromTM(tm, nyc);
+  t = absl::FromTM(tm, nyc);
   EXPECT_EQ("2014-03-09T03:30:42-04:00", absl::FormatTime(t, nyc));  // DST
   tm.tm_isdst = 0;
-  t = FromTM(tm, nyc);
+  t = absl::FromTM(tm, nyc);
   EXPECT_EQ("2014-03-09T01:30:42-05:00", absl::FormatTime(t, nyc));  // STD
   tm.tm_isdst = 1;
-  t = FromTM(tm, nyc);
+  t = absl::FromTM(tm, nyc);
   EXPECT_EQ("2014-03-09T03:30:42-04:00", absl::FormatTime(t, nyc));  // DST
 
   // Adjusts tm to refer to a time with a year larger than 2147483647.
@@ -853,7 +897,7 @@
   tm.tm_min = 2;
   tm.tm_sec = 3;
   tm.tm_isdst = -1;
-  t = FromTM(tm, absl::UTCTimeZone());
+  t = absl::FromTM(tm, absl::UTCTimeZone());
   EXPECT_EQ("2147483648-06-28T01:02:03+00:00",
             absl::FormatTime(t, absl::UTCTimeZone()));
 
@@ -865,7 +909,7 @@
   tm.tm_min = 2;
   tm.tm_sec = 3;
   tm.tm_isdst = -1;
-  t = FromTM(tm, absl::UTCTimeZone());
+  t = absl::FromTM(tm, absl::UTCTimeZone());
   EXPECT_EQ("178958989-08-28T01:02:03+00:00",
             absl::FormatTime(t, absl::UTCTimeZone()));
 }
@@ -878,8 +922,8 @@
   absl::Time start = absl::FromCivil(absl::CivilHour(2014, 3, 9, 0), nyc);
   absl::Time end = absl::FromCivil(absl::CivilHour(2014, 3, 9, 4), nyc);
   for (absl::Time t = start; t < end; t += absl::Minutes(1)) {
-    struct tm tm = ToTM(t, nyc);
-    absl::Time rt = FromTM(tm, nyc);
+    struct tm tm = absl::ToTM(t, nyc);
+    absl::Time rt = absl::FromTM(tm, nyc);
     EXPECT_EQ(rt, t);
   }
 
@@ -887,8 +931,8 @@
   start = absl::FromCivil(absl::CivilHour(2014, 11, 2, 0), nyc);
   end = absl::FromCivil(absl::CivilHour(2014, 11, 2, 4), nyc);
   for (absl::Time t = start; t < end; t += absl::Minutes(1)) {
-    struct tm tm = ToTM(t, nyc);
-    absl::Time rt = FromTM(tm, nyc);
+    struct tm tm = absl::ToTM(t, nyc);
+    absl::Time rt = absl::FromTM(tm, nyc);
     EXPECT_EQ(rt, t);
   }
 
@@ -896,8 +940,8 @@
   start = absl::FromCivil(absl::CivilHour(2014, 6, 27, 22), nyc);
   end = absl::FromCivil(absl::CivilHour(2014, 6, 28, 4), nyc);
   for (absl::Time t = start; t < end; t += absl::Minutes(1)) {
-    struct tm tm = ToTM(t, nyc);
-    absl::Time rt = FromTM(tm, nyc);
+    struct tm tm = absl::ToTM(t, nyc);
+    absl::Time rt = absl::FromTM(tm, nyc);
     EXPECT_EQ(rt, t);
   }
 }
@@ -985,30 +1029,30 @@
   tv.tv_sec = max_timeval_sec;
   tv.tv_usec = 999998;
   t = absl::TimeFromTimeval(tv);
-  tv = ToTimeval(t);
+  tv = absl::ToTimeval(t);
   EXPECT_EQ(max_timeval_sec, tv.tv_sec);
   EXPECT_EQ(999998, tv.tv_usec);
   t += absl::Microseconds(1);
-  tv = ToTimeval(t);
+  tv = absl::ToTimeval(t);
   EXPECT_EQ(max_timeval_sec, tv.tv_sec);
   EXPECT_EQ(999999, tv.tv_usec);
   t += absl::Microseconds(1);  // no effect
-  tv = ToTimeval(t);
+  tv = absl::ToTimeval(t);
   EXPECT_EQ(max_timeval_sec, tv.tv_sec);
   EXPECT_EQ(999999, tv.tv_usec);
 
   tv.tv_sec = min_timeval_sec;
   tv.tv_usec = 1;
   t = absl::TimeFromTimeval(tv);
-  tv = ToTimeval(t);
+  tv = absl::ToTimeval(t);
   EXPECT_EQ(min_timeval_sec, tv.tv_sec);
   EXPECT_EQ(1, tv.tv_usec);
   t -= absl::Microseconds(1);
-  tv = ToTimeval(t);
+  tv = absl::ToTimeval(t);
   EXPECT_EQ(min_timeval_sec, tv.tv_sec);
   EXPECT_EQ(0, tv.tv_usec);
   t -= absl::Microseconds(1);  // no effect
-  tv = ToTimeval(t);
+  tv = absl::ToTimeval(t);
   EXPECT_EQ(min_timeval_sec, tv.tv_sec);
   EXPECT_EQ(0, tv.tv_usec);
 
diff --git a/absl/types/BUILD.bazel b/absl/types/BUILD.bazel
index ce8f605..a86e2c1 100644
--- a/absl/types/BUILD.bazel
+++ b/absl/types/BUILD.bazel
@@ -138,6 +138,7 @@
         "//absl/container:fixed_array",
         "//absl/container:inlined_vector",
         "//absl/hash:hash_testing",
+        "//absl/meta:type_traits",
         "//absl/strings",
         "@com_google_googletest//:gtest",
         "@com_google_googletest//:gtest_main",
diff --git a/absl/types/CMakeLists.txt b/absl/types/CMakeLists.txt
index 92b4ae4..fed532f 100644
--- a/absl/types/CMakeLists.txt
+++ b/absl/types/CMakeLists.txt
@@ -138,6 +138,7 @@
     absl::inlined_vector
     absl::hash_testing
     absl::strings
+    absl::type_traits
     GTest::gmock_main
 )
 
diff --git a/absl/types/internal/optional.h b/absl/types/internal/optional.h
index a96d260..5731a5b 100644
--- a/absl/types/internal/optional.h
+++ b/absl/types/internal/optional.h
@@ -81,7 +81,7 @@
 
   template <typename... Args>
   constexpr explicit optional_data_dtor_base(in_place_t, Args&&... args)
-      : engaged_(true), data_(absl::forward<Args>(args)...) {}
+      : engaged_(true), data_(std::forward<Args>(args)...) {}
 
   ~optional_data_dtor_base() { destruct(); }
 };
@@ -110,7 +110,7 @@
 
   template <typename... Args>
   constexpr explicit optional_data_dtor_base(in_place_t, Args&&... args)
-      : engaged_(true), data_(absl::forward<Args>(args)...) {}
+      : engaged_(true), data_(std::forward<Args>(args)...) {}
 };
 
 template <typename T>
diff --git a/absl/types/internal/variant.h b/absl/types/internal/variant.h
index 263d7b0..4cb15f2 100644
--- a/absl/types/internal/variant.h
+++ b/absl/types/internal/variant.h
@@ -26,6 +26,7 @@
 #include <stdexcept>
 #include <tuple>
 #include <type_traits>
+#include <utility>
 
 #include "absl/base/config.h"
 #include "absl/base/internal/identity.h"
@@ -214,7 +215,7 @@
       std::is_same<ReturnType, decltype(std::declval<FunctionObject>()(
                                    SizeT<Indices>()...))>::value,
       "Not all visitation overloads have the same return type.");
-  return absl::forward<FunctionObject>(function)(SizeT<Indices>()...);
+  return std::forward<FunctionObject>(function)(SizeT<Indices>()...);
 }
 
 template <class ReturnType, class FunctionObject, std::size_t... BoundIndices>
@@ -272,27 +273,14 @@
   template <class Op>
   [[noreturn]] static VisitIndicesResultT<Op, std::size_t> Run(
       Op&& /*ignored*/) {
-#if ABSL_HAVE_BUILTIN(__builtin_unreachable) || \
-    (defined(__GNUC__) && !defined(__clang__))
-    __builtin_unreachable();
-#elif defined(_MSC_VER)
-    __assume(false);
-#else
-    // Try to use assert of false being identified as an unreachable intrinsic.
-    // NOTE: We use assert directly to increase chances of exploiting an assume
-    //       intrinsic.
-    assert(false);  // NOLINT
-
-    // Hack to silence potential no return warning -- cause an infinite loop.
-    return Run(absl::forward<Op>(op));
-#endif  // Checks for __builtin_unreachable
+    ABSL_UNREACHABLE();
   }
 };
 
 template <class Op, std::size_t I>
 struct ReachableSwitchCase {
   static VisitIndicesResultT<Op, std::size_t> Run(Op&& op) {
-    return absl::base_internal::invoke(absl::forward<Op>(op), SizeT<I>());
+    return absl::base_internal::invoke(std::forward<Op>(op), SizeT<I>());
   }
 };
 
@@ -357,74 +345,74 @@
   static VisitIndicesResultT<Op, std::size_t> Run(Op&& op, std::size_t i) {
     switch (i) {
       case 0:
-        return PickCase<Op, 0, EndIndex>::Run(absl::forward<Op>(op));
+        return PickCase<Op, 0, EndIndex>::Run(std::forward<Op>(op));
       case 1:
-        return PickCase<Op, 1, EndIndex>::Run(absl::forward<Op>(op));
+        return PickCase<Op, 1, EndIndex>::Run(std::forward<Op>(op));
       case 2:
-        return PickCase<Op, 2, EndIndex>::Run(absl::forward<Op>(op));
+        return PickCase<Op, 2, EndIndex>::Run(std::forward<Op>(op));
       case 3:
-        return PickCase<Op, 3, EndIndex>::Run(absl::forward<Op>(op));
+        return PickCase<Op, 3, EndIndex>::Run(std::forward<Op>(op));
       case 4:
-        return PickCase<Op, 4, EndIndex>::Run(absl::forward<Op>(op));
+        return PickCase<Op, 4, EndIndex>::Run(std::forward<Op>(op));
       case 5:
-        return PickCase<Op, 5, EndIndex>::Run(absl::forward<Op>(op));
+        return PickCase<Op, 5, EndIndex>::Run(std::forward<Op>(op));
       case 6:
-        return PickCase<Op, 6, EndIndex>::Run(absl::forward<Op>(op));
+        return PickCase<Op, 6, EndIndex>::Run(std::forward<Op>(op));
       case 7:
-        return PickCase<Op, 7, EndIndex>::Run(absl::forward<Op>(op));
+        return PickCase<Op, 7, EndIndex>::Run(std::forward<Op>(op));
       case 8:
-        return PickCase<Op, 8, EndIndex>::Run(absl::forward<Op>(op));
+        return PickCase<Op, 8, EndIndex>::Run(std::forward<Op>(op));
       case 9:
-        return PickCase<Op, 9, EndIndex>::Run(absl::forward<Op>(op));
+        return PickCase<Op, 9, EndIndex>::Run(std::forward<Op>(op));
       case 10:
-        return PickCase<Op, 10, EndIndex>::Run(absl::forward<Op>(op));
+        return PickCase<Op, 10, EndIndex>::Run(std::forward<Op>(op));
       case 11:
-        return PickCase<Op, 11, EndIndex>::Run(absl::forward<Op>(op));
+        return PickCase<Op, 11, EndIndex>::Run(std::forward<Op>(op));
       case 12:
-        return PickCase<Op, 12, EndIndex>::Run(absl::forward<Op>(op));
+        return PickCase<Op, 12, EndIndex>::Run(std::forward<Op>(op));
       case 13:
-        return PickCase<Op, 13, EndIndex>::Run(absl::forward<Op>(op));
+        return PickCase<Op, 13, EndIndex>::Run(std::forward<Op>(op));
       case 14:
-        return PickCase<Op, 14, EndIndex>::Run(absl::forward<Op>(op));
+        return PickCase<Op, 14, EndIndex>::Run(std::forward<Op>(op));
       case 15:
-        return PickCase<Op, 15, EndIndex>::Run(absl::forward<Op>(op));
+        return PickCase<Op, 15, EndIndex>::Run(std::forward<Op>(op));
       case 16:
-        return PickCase<Op, 16, EndIndex>::Run(absl::forward<Op>(op));
+        return PickCase<Op, 16, EndIndex>::Run(std::forward<Op>(op));
       case 17:
-        return PickCase<Op, 17, EndIndex>::Run(absl::forward<Op>(op));
+        return PickCase<Op, 17, EndIndex>::Run(std::forward<Op>(op));
       case 18:
-        return PickCase<Op, 18, EndIndex>::Run(absl::forward<Op>(op));
+        return PickCase<Op, 18, EndIndex>::Run(std::forward<Op>(op));
       case 19:
-        return PickCase<Op, 19, EndIndex>::Run(absl::forward<Op>(op));
+        return PickCase<Op, 19, EndIndex>::Run(std::forward<Op>(op));
       case 20:
-        return PickCase<Op, 20, EndIndex>::Run(absl::forward<Op>(op));
+        return PickCase<Op, 20, EndIndex>::Run(std::forward<Op>(op));
       case 21:
-        return PickCase<Op, 21, EndIndex>::Run(absl::forward<Op>(op));
+        return PickCase<Op, 21, EndIndex>::Run(std::forward<Op>(op));
       case 22:
-        return PickCase<Op, 22, EndIndex>::Run(absl::forward<Op>(op));
+        return PickCase<Op, 22, EndIndex>::Run(std::forward<Op>(op));
       case 23:
-        return PickCase<Op, 23, EndIndex>::Run(absl::forward<Op>(op));
+        return PickCase<Op, 23, EndIndex>::Run(std::forward<Op>(op));
       case 24:
-        return PickCase<Op, 24, EndIndex>::Run(absl::forward<Op>(op));
+        return PickCase<Op, 24, EndIndex>::Run(std::forward<Op>(op));
       case 25:
-        return PickCase<Op, 25, EndIndex>::Run(absl::forward<Op>(op));
+        return PickCase<Op, 25, EndIndex>::Run(std::forward<Op>(op));
       case 26:
-        return PickCase<Op, 26, EndIndex>::Run(absl::forward<Op>(op));
+        return PickCase<Op, 26, EndIndex>::Run(std::forward<Op>(op));
       case 27:
-        return PickCase<Op, 27, EndIndex>::Run(absl::forward<Op>(op));
+        return PickCase<Op, 27, EndIndex>::Run(std::forward<Op>(op));
       case 28:
-        return PickCase<Op, 28, EndIndex>::Run(absl::forward<Op>(op));
+        return PickCase<Op, 28, EndIndex>::Run(std::forward<Op>(op));
       case 29:
-        return PickCase<Op, 29, EndIndex>::Run(absl::forward<Op>(op));
+        return PickCase<Op, 29, EndIndex>::Run(std::forward<Op>(op));
       case 30:
-        return PickCase<Op, 30, EndIndex>::Run(absl::forward<Op>(op));
+        return PickCase<Op, 30, EndIndex>::Run(std::forward<Op>(op));
       case 31:
-        return PickCase<Op, 31, EndIndex>::Run(absl::forward<Op>(op));
+        return PickCase<Op, 31, EndIndex>::Run(std::forward<Op>(op));
       case 32:
-        return PickCase<Op, 32, EndIndex>::Run(absl::forward<Op>(op));
+        return PickCase<Op, 32, EndIndex>::Run(std::forward<Op>(op));
       default:
         ABSL_ASSERT(i == variant_npos);
-        return absl::base_internal::invoke(absl::forward<Op>(op), NPos());
+        return absl::base_internal::invoke(std::forward<Op>(op), NPos());
     }
   }
 };
@@ -437,7 +425,7 @@
         MakeVisitationMatrix<VisitIndicesResultT<Op, SizeT...>, Op,
                              index_sequence<(EndIndices + 1)...>,
                              index_sequence<>>::Run(),
-        (indices + 1)...)(absl::forward<Op>(op));
+        (indices + 1)...)(std::forward<Op>(op));
   }
 };
 
@@ -489,7 +477,7 @@
     VisitIndicesResultT<Op, decltype(EndIndices)...> operator()(
         SizeT<I> /*index*/) && {
       return base_internal::invoke(
-          absl::forward<Op>(op),
+          std::forward<Op>(op),
           SizeT<UnflattenIndex<I, N, (EndIndices + 1)...>::value -
                 std::size_t{1}>()...);
     }
@@ -501,7 +489,7 @@
   static VisitIndicesResultT<Op, decltype(EndIndices)...> Run(Op&& op,
                                                               SizeType... i) {
     return VisitIndicesSwitch<NumCasesOfSwitch<EndIndices...>::value>::Run(
-        FlattenedOp<Op>{absl::forward<Op>(op)},
+        FlattenedOp<Op>{std::forward<Op>(op)},
         FlattenIndices<(EndIndices + std::size_t{1})...>::Run(
             (i + std::size_t{1})...));
   }
@@ -612,7 +600,7 @@
       TypedThrowBadVariantAccess<VariantAccessResult<I, Variant>>();
     }
 
-    return Access<I>(absl::forward<Variant>(self));
+    return Access<I>(std::forward<Variant>(self));
   }
 
   // The implementation of the move-assignment operation for a variant.
@@ -684,7 +672,7 @@
 
     void operator()(SizeT<NewIndex::value> /*old_i*/
     ) const {
-      Access<NewIndex::value>(*left) = absl::forward<QualifiedNew>(other);
+      Access<NewIndex::value>(*left) = std::forward<QualifiedNew>(other);
     }
 
     template <std::size_t OldIndex>
@@ -695,13 +683,13 @@
       if (std::is_nothrow_constructible<New, QualifiedNew>::value ||
           !std::is_nothrow_move_constructible<New>::value) {
         left->template emplace<NewIndex::value>(
-            absl::forward<QualifiedNew>(other));
+            std::forward<QualifiedNew>(other));
       } else {
         // the standard says "equivalent to
         // operator=(variant(std::forward<T>(t)))", but we use `emplace` here
         // because the variant's move assignment operator could be deleted.
         left->template emplace<NewIndex::value>(
-            New(absl::forward<QualifiedNew>(other)));
+            New(std::forward<QualifiedNew>(other)));
       }
     }
 
@@ -712,7 +700,7 @@
   template <class Left, class QualifiedNew>
   static ConversionAssignVisitor<Left, QualifiedNew>
   MakeConversionAssignVisitor(Left* left, QualifiedNew&& qual) {
-    return {left, absl::forward<QualifiedNew>(qual)};
+    return {left, std::forward<QualifiedNew>(qual)};
   }
 
   // Backend for operations for `emplace()` which destructs `*self` then
@@ -723,7 +711,7 @@
     Destroy(*self);
     using New = typename absl::variant_alternative<NewIndex, Self>::type;
     New* const result = ::new (static_cast<void*>(&self->state_))
-        New(absl::forward<Args>(args)...);
+        New(std::forward<Args>(args)...);
     self->index_ = NewIndex;
     return *result;
   }
@@ -919,9 +907,9 @@
                                           Is, QualifiedVariants>...)>>::value,
         "All visitation overloads must have the same return type.");
     return absl::base_internal::invoke(
-        absl::forward<Op>(op),
+        std::forward<Op>(op),
         VariantCoreAccess::Access<Is>(
-            absl::forward<QualifiedVariants>(std::get<TupIs>(variant_tup)))...);
+            std::forward<QualifiedVariants>(std::get<TupIs>(variant_tup)))...);
   }
 
   template <std::size_t... TupIs, std::size_t... Is>
@@ -969,11 +957,11 @@
 
   template <class... P>
   explicit constexpr Union(EmplaceTag<0>, P&&... args)
-      : head(absl::forward<P>(args)...) {}
+      : head(std::forward<P>(args)...) {}
 
   template <std::size_t I, class... P>
   explicit constexpr Union(EmplaceTag<I>, P&&... args)
-      : tail(EmplaceTag<I - 1>{}, absl::forward<P>(args)...) {}
+      : tail(EmplaceTag<I - 1>{}, std::forward<P>(args)...) {}
 
   Head head;
   TailUnion tail;
@@ -1001,11 +989,11 @@
 
   template <class... P>
   explicit constexpr DestructibleUnionImpl(EmplaceTag<0>, P&&... args)
-      : head(absl::forward<P>(args)...) {}
+      : head(std::forward<P>(args)...) {}
 
   template <std::size_t I, class... P>
   explicit constexpr DestructibleUnionImpl(EmplaceTag<I>, P&&... args)
-      : tail(EmplaceTag<I - 1>{}, absl::forward<P>(args)...) {}
+      : tail(EmplaceTag<I - 1>{}, std::forward<P>(args)...) {}
 
   ~DestructibleUnionImpl() {}
 
@@ -1036,7 +1024,7 @@
 
   template <std::size_t I, class... P>
   explicit constexpr VariantStateBase(EmplaceTag<I> tag, P&&... args)
-      : state_(tag, absl::forward<P>(args)...), index_(I) {}
+      : state_(tag, std::forward<P>(args)...), index_(I) {}
 
   explicit constexpr VariantStateBase(NoopConstructorTag)
       : state_(NoopConstructorTag()), index_(variant_npos) {}
@@ -1321,7 +1309,7 @@
       using Alternative =
           typename absl::variant_alternative<I, variant<T...>>::type;
       ::new (static_cast<void*>(&self->state_)) Alternative(
-          variant_internal::AccessUnion(absl::move(other->state_), i));
+          variant_internal::AccessUnion(std::move(other->state_), i));
     }
 
     void operator()(SizeT<absl::variant_npos> /*i*/) const {}
diff --git a/absl/types/optional.h b/absl/types/optional.h
index 395fe62..cf7249c 100644
--- a/absl/types/optional.h
+++ b/absl/types/optional.h
@@ -151,7 +151,7 @@
                 std::is_same<InPlaceT, in_place_t>,
                 std::is_constructible<T, Args&&...> >::value>* = nullptr>
   constexpr explicit optional(InPlaceT, Args&&... args)
-      : data_base(in_place_t(), absl::forward<Args>(args)...) {}
+      : data_base(in_place_t(), std::forward<Args>(args)...) {}
 
   // Constructs a non-empty `optional` direct-initialized value of type `T` from
   // the arguments of an initializer_list and `std::forward<Args>(args)...`.
@@ -162,8 +162,7 @@
                 T, std::initializer_list<U>&, Args&&...>::value>::type>
   constexpr explicit optional(in_place_t, std::initializer_list<U> il,
                               Args&&... args)
-      : data_base(in_place_t(), il, absl::forward<Args>(args)...) {
-  }
+      : data_base(in_place_t(), il, std::forward<Args>(args)...) {}
 
   // Value constructor (implicit)
   template <
@@ -176,21 +175,21 @@
                             std::is_convertible<U&&, T>,
                             std::is_constructible<T, U&&> >::value,
           bool>::type = false>
-  constexpr optional(U&& v) : data_base(in_place_t(), absl::forward<U>(v)) {}
+  constexpr optional(U&& v) : data_base(in_place_t(), std::forward<U>(v)) {}
 
   // Value constructor (explicit)
   template <
       typename U = T,
       typename std::enable_if<
           absl::conjunction<absl::negation<std::is_same<
-                                in_place_t, typename std::decay<U>::type>>,
+                                in_place_t, typename std::decay<U>::type> >,
                             absl::negation<std::is_same<
-                                optional<T>, typename std::decay<U>::type>>,
-                            absl::negation<std::is_convertible<U&&, T>>,
-                            std::is_constructible<T, U&&>>::value,
+                                optional<T>, typename std::decay<U>::type> >,
+                            absl::negation<std::is_convertible<U&&, T> >,
+                            std::is_constructible<T, U&&> >::value,
           bool>::type = false>
   explicit constexpr optional(U&& v)
-      : data_base(in_place_t(), absl::forward<U>(v)) {}
+      : data_base(in_place_t(), std::forward<U>(v)) {}
 
   // Converting copy constructor (implicit)
   template <typename U,
@@ -437,7 +436,7 @@
     return reference();
   }
   constexpr const T&& operator*() const&& ABSL_ATTRIBUTE_LIFETIME_BOUND {
-    return ABSL_HARDENING_ASSERT(this->engaged_), absl::move(reference());
+    return ABSL_HARDENING_ASSERT(this->engaged_), std::move(reference());
   }
   T&& operator*() && ABSL_ATTRIBUTE_LIFETIME_BOUND {
     ABSL_HARDENING_ASSERT(this->engaged_);
@@ -492,7 +491,7 @@
   }
   constexpr const T&& value()
       const&& ABSL_ATTRIBUTE_LIFETIME_BOUND {  // NOLINT(build/c++11)
-    return absl::move(
+    return std::move(
         static_cast<bool>(*this)
             ? reference()
             : (optional_internal::throw_bad_optional_access(), reference()));
@@ -511,9 +510,8 @@
                   "optional<T>::value_or: T must be copy constructible");
     static_assert(std::is_convertible<U&&, value_type>::value,
                   "optional<T>::value_or: U must be convertible to T");
-    return static_cast<bool>(*this)
-               ? **this
-               : static_cast<T>(absl::forward<U>(v));
+    return static_cast<bool>(*this) ? **this
+                                    : static_cast<T>(std::forward<U>(v));
   }
   template <typename U>
   T value_or(U&& v) && {  // NOLINT(build/c++11)
@@ -573,19 +571,18 @@
 //   static_assert(opt.value() == 1, "");
 template <typename T>
 constexpr optional<typename std::decay<T>::type> make_optional(T&& v) {
-  return optional<typename std::decay<T>::type>(absl::forward<T>(v));
+  return optional<typename std::decay<T>::type>(std::forward<T>(v));
 }
 
 template <typename T, typename... Args>
 constexpr optional<T> make_optional(Args&&... args) {
-  return optional<T>(in_place_t(), absl::forward<Args>(args)...);
+  return optional<T>(in_place_t(), std::forward<Args>(args)...);
 }
 
 template <typename T, typename U, typename... Args>
 constexpr optional<T> make_optional(std::initializer_list<U> il,
                                     Args&&... args) {
-  return optional<T>(in_place_t(), il,
-                     absl::forward<Args>(args)...);
+  return optional<T>(in_place_t(), il, std::forward<Args>(args)...);
 }
 
 // Relational operators [optional.relops]
diff --git a/absl/types/optional_test.cc b/absl/types/optional_test.cc
index 5da297b..115e20c 100644
--- a/absl/types/optional_test.cc
+++ b/absl/types/optional_test.cc
@@ -982,37 +982,6 @@
   static_assert((*opt1).x == ConstexprType::kCtorInt, "");
 }
 
-// gcc has a bug pre 4.9.1 where it doesn't do correct overload resolution
-// when overloads are const-qualified and *this is an raluve.
-// Skip that test to make the build green again when using the old compiler.
-// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59296 is fixed in 4.9.1.
-#if defined(__GNUC__) && !defined(__clang__)
-#define GCC_VERSION \
-  (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
-#if GCC_VERSION < 40901
-#define ABSL_SKIP_OVERLOAD_TEST_DUE_TO_GCC_BUG
-#endif
-#endif
-
-// MSVC has a bug with "cv-qualifiers in class construction", fixed in 2017. See
-// https://docs.microsoft.com/en-us/cpp/cpp-conformance-improvements-2017#bug-fixes
-// The compiler some incorrectly ignores the cv-qualifier when generating a
-// class object via a constructor call. For example:
-//
-// class optional {
-//   constexpr T&& value() &&;
-//   constexpr const T&& value() const &&;
-// }
-//
-// using COI = const absl::optional<int>;
-// static_assert(2 == COI(2).value(), "");  // const &&
-//
-// This should invoke the "const &&" overload but since it ignores the const
-// qualifier it finds the "&&" overload the best candidate.
-#if defined(_MSC_VER) && _MSC_VER < 1910
-#define ABSL_SKIP_OVERLOAD_TEST_DUE_TO_MSVC_BUG
-#endif
-
 TEST(optionalTest, Value) {
   using O = absl::optional<std::string>;
   using CO = const absl::optional<std::string>;
@@ -1025,17 +994,12 @@
   EXPECT_EQ("lvalue_c", lvalue_c.value());
   EXPECT_EQ("xvalue", O(absl::in_place, "xvalue").value());
   EXPECT_EQ("xvalue_c", OC(absl::in_place, "xvalue_c").value());
-#ifndef ABSL_SKIP_OVERLOAD_TEST_DUE_TO_GCC_BUG
   EXPECT_EQ("cxvalue", CO(absl::in_place, "cxvalue").value());
-#endif
   EXPECT_EQ("&", TypeQuals(lvalue.value()));
   EXPECT_EQ("c&", TypeQuals(clvalue.value()));
   EXPECT_EQ("c&", TypeQuals(lvalue_c.value()));
   EXPECT_EQ("&&", TypeQuals(O(absl::in_place, "xvalue").value()));
-#if !defined(ABSL_SKIP_OVERLOAD_TEST_DUE_TO_MSVC_BUG) && \
-    !defined(ABSL_SKIP_OVERLOAD_TEST_DUE_TO_GCC_BUG)
   EXPECT_EQ("c&&", TypeQuals(CO(absl::in_place, "cxvalue").value()));
-#endif
   EXPECT_EQ("c&&", TypeQuals(OC(absl::in_place, "xvalue_c").value()));
 
 #if !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
@@ -1059,7 +1023,7 @@
   // test constexpr value()
   constexpr absl::optional<int> o1(1);
   static_assert(1 == o1.value(), "");  // const &
-#if !defined(_MSC_VER) && !defined(ABSL_SKIP_OVERLOAD_TEST_DUE_TO_GCC_BUG)
+#ifndef _MSC_VER
   using COI = const absl::optional<int>;
   static_assert(2 == COI(2).value(), "");  // const &&
 #endif
@@ -1077,16 +1041,11 @@
   EXPECT_EQ("lvalue_c", *lvalue_c);
   EXPECT_EQ("xvalue", *O(absl::in_place, "xvalue"));
   EXPECT_EQ("xvalue_c", *OC(absl::in_place, "xvalue_c"));
-#ifndef ABSL_SKIP_OVERLOAD_TEST_DUE_TO_GCC_BUG
   EXPECT_EQ("cxvalue", *CO(absl::in_place, "cxvalue"));
-#endif
   EXPECT_EQ("&", TypeQuals(*lvalue));
   EXPECT_EQ("c&", TypeQuals(*clvalue));
   EXPECT_EQ("&&", TypeQuals(*O(absl::in_place, "xvalue")));
-#if !defined(ABSL_SKIP_OVERLOAD_TEST_DUE_TO_MSVC_BUG) && \
-    !defined(ABSL_SKIP_OVERLOAD_TEST_DUE_TO_GCC_BUG)
   EXPECT_EQ("c&&", TypeQuals(*CO(absl::in_place, "cxvalue")));
-#endif
   EXPECT_EQ("c&&", TypeQuals(*OC(absl::in_place, "xvalue_c")));
 
 #if !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
@@ -1117,11 +1076,9 @@
   constexpr absl::optional<double> copt_empty, copt_set = {1.2};
   static_assert(42.0 == copt_empty.value_or(42), "");
   static_assert(1.2 == copt_set.value_or(42), "");
-#ifndef ABSL_SKIP_OVERLOAD_TEST_DUE_TO_MSVC_BUG
   using COD = const absl::optional<double>;
   static_assert(42.0 == COD().value_or(42), "");
   static_assert(1.2 == COD(1.2).value_or(42), "");
-#endif
 }
 
 // make_optional cannot be constexpr until C++17
diff --git a/absl/types/span.h b/absl/types/span.h
index 88cd759..a0f8027 100644
--- a/absl/types/span.h
+++ b/absl/types/span.h
@@ -43,7 +43,7 @@
 //    * A read-only `absl::Span<const T>` can be implicitly constructed from an
 //      initializer list.
 //    * `absl::Span` has no `bytes()`, `size_bytes()`, `as_bytes()`, or
-//      `as_mutable_bytes()` methods
+//      `as_writable_bytes()` methods
 //    * `absl::Span` has no static extent template parameter, nor constructors
 //      which exist only because of the static extent parameter.
 //    * `absl::Span` has an explicit mutable-reference constructor
@@ -151,7 +151,7 @@
 //   int* my_array = new int[10];
 //   MyRoutine(absl::Span<const int>(my_array, 10));
 template <typename T>
-class Span {
+class ABSL_INTERNAL_ATTRIBUTE_VIEW Span {
  private:
   // Used to determine whether a Span can be constructed from a container of
   // type C.
@@ -185,6 +185,7 @@
   using const_reverse_iterator = std::reverse_iterator<const_iterator>;
   using size_type = size_t;
   using difference_type = ptrdiff_t;
+  using absl_internal_is_view = std::true_type;
 
   static const size_type npos = ~(size_type(0));
 
diff --git a/absl/types/span_test.cc b/absl/types/span_test.cc
index 29e8681..e24144d 100644
--- a/absl/types/span_test.cc
+++ b/absl/types/span_test.cc
@@ -31,10 +31,15 @@
 #include "absl/container/fixed_array.h"
 #include "absl/container/inlined_vector.h"
 #include "absl/hash/hash_testing.h"
+#include "absl/meta/type_traits.h"
 #include "absl/strings/str_cat.h"
 
 namespace {
 
+static_assert(!absl::type_traits_internal::IsOwner<absl::Span<int>>::value &&
+                  absl::type_traits_internal::IsView<absl::Span<int>>::value,
+              "Span is a view, not an owner");
+
 MATCHER_P(DataIs, data,
           absl::StrCat("data() ", negation ? "isn't " : "is ",
                        testing::PrintToString(data))) {
diff --git a/absl/types/variant.h b/absl/types/variant.h
index ac93464..56a7e05 100644
--- a/absl/types/variant.h
+++ b/absl/types/variant.h
@@ -303,11 +303,10 @@
 }
 
 // Overload for getting a variant's rvalue by type.
-// Note: `absl::move()` is required to allow use of constexpr in C++11.
 template <class T, class... Types>
 constexpr T&& get(variant<Types...>&& v) {
   return variant_internal::VariantCoreAccess::CheckedAccess<
-      variant_internal::IndexOf<T, Types...>::value>(absl::move(v));
+      variant_internal::IndexOf<T, Types...>::value>(std::move(v));
 }
 
 // Overload for getting a variant's const lvalue by type.
@@ -318,11 +317,10 @@
 }
 
 // Overload for getting a variant's const rvalue by type.
-// Note: `absl::move()` is required to allow use of constexpr in C++11.
 template <class T, class... Types>
 constexpr const T&& get(const variant<Types...>&& v) {
   return variant_internal::VariantCoreAccess::CheckedAccess<
-      variant_internal::IndexOf<T, Types...>::value>(absl::move(v));
+      variant_internal::IndexOf<T, Types...>::value>(std::move(v));
 }
 
 // Overload for getting a variant's lvalue by index.
@@ -333,11 +331,10 @@
 }
 
 // Overload for getting a variant's rvalue by index.
-// Note: `absl::move()` is required to allow use of constexpr in C++11.
 template <std::size_t I, class... Types>
 constexpr variant_alternative_t<I, variant<Types...>>&& get(
     variant<Types...>&& v) {
-  return variant_internal::VariantCoreAccess::CheckedAccess<I>(absl::move(v));
+  return variant_internal::VariantCoreAccess::CheckedAccess<I>(std::move(v));
 }
 
 // Overload for getting a variant's const lvalue by index.
@@ -348,11 +345,10 @@
 }
 
 // Overload for getting a variant's const rvalue by index.
-// Note: `absl::move()` is required to allow use of constexpr in C++11.
 template <std::size_t I, class... Types>
 constexpr const variant_alternative_t<I, variant<Types...>>&& get(
     const variant<Types...>&& v) {
-  return variant_internal::VariantCoreAccess::CheckedAccess<I>(absl::move(v));
+  return variant_internal::VariantCoreAccess::CheckedAccess<I>(std::move(v));
 }
 
 // get_if()
@@ -432,8 +428,8 @@
   return variant_internal::
       VisitIndices<variant_size<absl::decay_t<Variants> >::value...>::Run(
           variant_internal::PerformVisitation<Visitor, Variants...>{
-              std::forward_as_tuple(absl::forward<Variants>(vars)...),
-              absl::forward<Visitor>(vis)},
+              std::forward_as_tuple(std::forward<Variants>(vars)...),
+              std::forward<Visitor>(vis)},
           vars.index()...);
 }
 
@@ -504,13 +500,12 @@
       class T,
       std::size_t I = std::enable_if<
           variant_internal::IsNeitherSelfNorInPlace<variant,
-                                                    absl::decay_t<T>>::value,
-          variant_internal::IndexOfConstructedType<variant, T>>::type::value,
+                                                    absl::decay_t<T> >::value,
+          variant_internal::IndexOfConstructedType<variant, T> >::type::value,
       class Tj = absl::variant_alternative_t<I, variant>,
-      absl::enable_if_t<std::is_constructible<Tj, T>::value>* =
-          nullptr>
+      absl::enable_if_t<std::is_constructible<Tj, T>::value>* = nullptr>
   constexpr variant(T&& t) noexcept(std::is_nothrow_constructible<Tj, T>::value)
-      : Base(variant_internal::EmplaceTag<I>(), absl::forward<T>(t)) {}
+      : Base(variant_internal::EmplaceTag<I>(), std::forward<T>(t)) {}
 
   // Constructs a variant of an alternative type from the arguments through
   // direct-initialization.
@@ -524,7 +519,7 @@
   constexpr explicit variant(in_place_type_t<T>, Args&&... args)
       : Base(variant_internal::EmplaceTag<
                  variant_internal::UnambiguousIndexOf<variant, T>::value>(),
-             absl::forward<Args>(args)...) {}
+             std::forward<Args>(args)...) {}
 
   // Constructs a variant of an alternative type from an initializer list
   // and other arguments through direct-initialization.
@@ -539,7 +534,7 @@
                              Args&&... args)
       : Base(variant_internal::EmplaceTag<
                  variant_internal::UnambiguousIndexOf<variant, T>::value>(),
-             il, absl::forward<Args>(args)...) {}
+             il, std::forward<Args>(args)...) {}
 
   // Constructs a variant of an alternative type from a provided index,
   // through value-initialization using the provided forwarded arguments.
@@ -548,7 +543,7 @@
                 variant_internal::VariantAlternativeSfinaeT<I, variant>,
                 Args...>::value>::type* = nullptr>
   constexpr explicit variant(in_place_index_t<I>, Args&&... args)
-      : Base(variant_internal::EmplaceTag<I>(), absl::forward<Args>(args)...) {}
+      : Base(variant_internal::EmplaceTag<I>(), std::forward<Args>(args)...) {}
 
   // Constructs a variant of an alternative type from a provided index,
   // through value-initialization of an initializer list and the provided
@@ -560,7 +555,7 @@
   constexpr explicit variant(in_place_index_t<I>, std::initializer_list<U> il,
                              Args&&... args)
       : Base(variant_internal::EmplaceTag<I>(), il,
-             absl::forward<Args>(args)...) {}
+             std::forward<Args>(args)...) {}
 
   // Destructors
 
@@ -595,7 +590,7 @@
           std::is_nothrow_constructible<Tj, T>::value) {
     variant_internal::VisitIndices<sizeof...(Tn) + 1>::Run(
         variant_internal::VariantCoreAccess::MakeConversionAssignVisitor(
-            this, absl::forward<T>(t)),
+            this, std::forward<T>(t)),
         index());
 
     return *this;
@@ -623,7 +618,7 @@
   T& emplace(Args&&... args) {
     return variant_internal::VariantCoreAccess::Replace<
         variant_internal::UnambiguousIndexOf<variant, T>::value>(
-        this, absl::forward<Args>(args)...);
+        this, std::forward<Args>(args)...);
   }
 
   // Constructs a value of the given alternative type T within the variant using
@@ -644,7 +639,7 @@
   T& emplace(std::initializer_list<U> il, Args&&... args) {
     return variant_internal::VariantCoreAccess::Replace<
         variant_internal::UnambiguousIndexOf<variant, T>::value>(
-        this, il, absl::forward<Args>(args)...);
+        this, il, std::forward<Args>(args)...);
   }
 
   // Destroys the current value of the variant (provided that
@@ -663,7 +658,7 @@
                                       Args...>::value>::type* = nullptr>
   absl::variant_alternative_t<I, variant>& emplace(Args&&... args) {
     return variant_internal::VariantCoreAccess::Replace<I>(
-        this, absl::forward<Args>(args)...);
+        this, std::forward<Args>(args)...);
   }
 
   // Destroys the current value of the variant (provided that
@@ -681,7 +676,7 @@
   absl::variant_alternative_t<I, variant>& emplace(std::initializer_list<U> il,
                                                    Args&&... args) {
     return variant_internal::VariantCoreAccess::Replace<I>(
-        this, il, absl::forward<Args>(args)...);
+        this, il, std::forward<Args>(args)...);
   }
 
   // variant::valueless_by_exception()
diff --git a/absl/types/variant_test.cc b/absl/types/variant_test.cc
index 4cd5b7a..91b142c 100644
--- a/absl/types/variant_test.cc
+++ b/absl/types/variant_test.cc
@@ -389,7 +389,7 @@
   using V = variant<MoveOnly<class A>, MoveOnly<class B>, MoveOnly<class C>>;
 
   V v(in_place_index<1>, 10);
-  V v2 = absl::move(v);
+  V v2 = std::move(v);
   EXPECT_EQ(10, absl::get<1>(v2).value);
 }
 
@@ -1209,60 +1209,60 @@
     Var v(absl::in_place_index<0>, 0);
 
     using LValueGetType = decltype(absl::get<0>(v));
-    using RValueGetType = decltype(absl::get<0>(absl::move(v)));
+    using RValueGetType = decltype(absl::get<0>(std::move(v)));
 
     EXPECT_TRUE((std::is_same<LValueGetType, int&>::value));
     EXPECT_TRUE((std::is_same<RValueGetType, int&&>::value));
     EXPECT_EQ(absl::get<0>(v), 0);
-    EXPECT_EQ(absl::get<0>(absl::move(v)), 0);
+    EXPECT_EQ(absl::get<0>(std::move(v)), 0);
 
     const Var& const_v = v;
     using ConstLValueGetType = decltype(absl::get<0>(const_v));
-    using ConstRValueGetType = decltype(absl::get<0>(absl::move(const_v)));
+    using ConstRValueGetType = decltype(absl::get<0>(std::move(const_v)));
     EXPECT_TRUE((std::is_same<ConstLValueGetType, const int&>::value));
     EXPECT_TRUE((std::is_same<ConstRValueGetType, const int&&>::value));
     EXPECT_EQ(absl::get<0>(const_v), 0);
-    EXPECT_EQ(absl::get<0>(absl::move(const_v)), 0);
+    EXPECT_EQ(absl::get<0>(std::move(const_v)), 0);
   }
 
   {
     Var v = std::string("Hello");
 
     using LValueGetType = decltype(absl::get<1>(v));
-    using RValueGetType = decltype(absl::get<1>(absl::move(v)));
+    using RValueGetType = decltype(absl::get<1>(std::move(v)));
 
     EXPECT_TRUE((std::is_same<LValueGetType, std::string&>::value));
     EXPECT_TRUE((std::is_same<RValueGetType, std::string&&>::value));
     EXPECT_EQ(absl::get<1>(v), "Hello");
-    EXPECT_EQ(absl::get<1>(absl::move(v)), "Hello");
+    EXPECT_EQ(absl::get<1>(std::move(v)), "Hello");
 
     const Var& const_v = v;
     using ConstLValueGetType = decltype(absl::get<1>(const_v));
-    using ConstRValueGetType = decltype(absl::get<1>(absl::move(const_v)));
+    using ConstRValueGetType = decltype(absl::get<1>(std::move(const_v)));
     EXPECT_TRUE((std::is_same<ConstLValueGetType, const std::string&>::value));
     EXPECT_TRUE((std::is_same<ConstRValueGetType, const std::string&&>::value));
     EXPECT_EQ(absl::get<1>(const_v), "Hello");
-    EXPECT_EQ(absl::get<1>(absl::move(const_v)), "Hello");
+    EXPECT_EQ(absl::get<1>(std::move(const_v)), "Hello");
   }
 
   {
     Var v = 2.0;
 
     using LValueGetType = decltype(absl::get<2>(v));
-    using RValueGetType = decltype(absl::get<2>(absl::move(v)));
+    using RValueGetType = decltype(absl::get<2>(std::move(v)));
 
     EXPECT_TRUE((std::is_same<LValueGetType, double&>::value));
     EXPECT_TRUE((std::is_same<RValueGetType, double&&>::value));
     EXPECT_EQ(absl::get<2>(v), 2.);
-    EXPECT_EQ(absl::get<2>(absl::move(v)), 2.);
+    EXPECT_EQ(absl::get<2>(std::move(v)), 2.);
 
     const Var& const_v = v;
     using ConstLValueGetType = decltype(absl::get<2>(const_v));
-    using ConstRValueGetType = decltype(absl::get<2>(absl::move(const_v)));
+    using ConstRValueGetType = decltype(absl::get<2>(std::move(const_v)));
     EXPECT_TRUE((std::is_same<ConstLValueGetType, const double&>::value));
     EXPECT_TRUE((std::is_same<ConstRValueGetType, const double&&>::value));
     EXPECT_EQ(absl::get<2>(const_v), 2.);
-    EXPECT_EQ(absl::get<2>(absl::move(const_v)), 2.);
+    EXPECT_EQ(absl::get<2>(std::move(const_v)), 2.);
   }
 
   {
@@ -1270,20 +1270,20 @@
     v.emplace<3>(1);
 
     using LValueGetType = decltype(absl::get<3>(v));
-    using RValueGetType = decltype(absl::get<3>(absl::move(v)));
+    using RValueGetType = decltype(absl::get<3>(std::move(v)));
 
     EXPECT_TRUE((std::is_same<LValueGetType, int&>::value));
     EXPECT_TRUE((std::is_same<RValueGetType, int&&>::value));
     EXPECT_EQ(absl::get<3>(v), 1);
-    EXPECT_EQ(absl::get<3>(absl::move(v)), 1);
+    EXPECT_EQ(absl::get<3>(std::move(v)), 1);
 
     const Var& const_v = v;
     using ConstLValueGetType = decltype(absl::get<3>(const_v));
-    using ConstRValueGetType = decltype(absl::get<3>(absl::move(const_v)));
+    using ConstRValueGetType = decltype(absl::get<3>(std::move(const_v)));
     EXPECT_TRUE((std::is_same<ConstLValueGetType, const int&>::value));
     EXPECT_TRUE((std::is_same<ConstRValueGetType, const int&&>::value));
     EXPECT_EQ(absl::get<3>(const_v), 1);
-    EXPECT_EQ(absl::get<3>(absl::move(const_v)), 1);  // NOLINT
+    EXPECT_EQ(absl::get<3>(std::move(const_v)), 1);  // NOLINT
   }
 }
 
@@ -1322,60 +1322,60 @@
     Var v = 1;
 
     using LValueGetType = decltype(absl::get<int>(v));
-    using RValueGetType = decltype(absl::get<int>(absl::move(v)));
+    using RValueGetType = decltype(absl::get<int>(std::move(v)));
 
     EXPECT_TRUE((std::is_same<LValueGetType, int&>::value));
     EXPECT_TRUE((std::is_same<RValueGetType, int&&>::value));
     EXPECT_EQ(absl::get<int>(v), 1);
-    EXPECT_EQ(absl::get<int>(absl::move(v)), 1);
+    EXPECT_EQ(absl::get<int>(std::move(v)), 1);
 
     const Var& const_v = v;
     using ConstLValueGetType = decltype(absl::get<int>(const_v));
-    using ConstRValueGetType = decltype(absl::get<int>(absl::move(const_v)));
+    using ConstRValueGetType = decltype(absl::get<int>(std::move(const_v)));
     EXPECT_TRUE((std::is_same<ConstLValueGetType, const int&>::value));
     EXPECT_TRUE((std::is_same<ConstRValueGetType, const int&&>::value));
     EXPECT_EQ(absl::get<int>(const_v), 1);
-    EXPECT_EQ(absl::get<int>(absl::move(const_v)), 1);
+    EXPECT_EQ(absl::get<int>(std::move(const_v)), 1);
   }
 
   {
     Var v = std::string("Hello");
 
     using LValueGetType = decltype(absl::get<1>(v));
-    using RValueGetType = decltype(absl::get<1>(absl::move(v)));
+    using RValueGetType = decltype(absl::get<1>(std::move(v)));
 
     EXPECT_TRUE((std::is_same<LValueGetType, std::string&>::value));
     EXPECT_TRUE((std::is_same<RValueGetType, std::string&&>::value));
     EXPECT_EQ(absl::get<std::string>(v), "Hello");
-    EXPECT_EQ(absl::get<std::string>(absl::move(v)), "Hello");
+    EXPECT_EQ(absl::get<std::string>(std::move(v)), "Hello");
 
     const Var& const_v = v;
     using ConstLValueGetType = decltype(absl::get<1>(const_v));
-    using ConstRValueGetType = decltype(absl::get<1>(absl::move(const_v)));
+    using ConstRValueGetType = decltype(absl::get<1>(std::move(const_v)));
     EXPECT_TRUE((std::is_same<ConstLValueGetType, const std::string&>::value));
     EXPECT_TRUE((std::is_same<ConstRValueGetType, const std::string&&>::value));
     EXPECT_EQ(absl::get<std::string>(const_v), "Hello");
-    EXPECT_EQ(absl::get<std::string>(absl::move(const_v)), "Hello");
+    EXPECT_EQ(absl::get<std::string>(std::move(const_v)), "Hello");
   }
 
   {
     Var v = 2.0;
 
     using LValueGetType = decltype(absl::get<2>(v));
-    using RValueGetType = decltype(absl::get<2>(absl::move(v)));
+    using RValueGetType = decltype(absl::get<2>(std::move(v)));
 
     EXPECT_TRUE((std::is_same<LValueGetType, double&>::value));
     EXPECT_TRUE((std::is_same<RValueGetType, double&&>::value));
     EXPECT_EQ(absl::get<double>(v), 2.);
-    EXPECT_EQ(absl::get<double>(absl::move(v)), 2.);
+    EXPECT_EQ(absl::get<double>(std::move(v)), 2.);
 
     const Var& const_v = v;
     using ConstLValueGetType = decltype(absl::get<2>(const_v));
-    using ConstRValueGetType = decltype(absl::get<2>(absl::move(const_v)));
+    using ConstRValueGetType = decltype(absl::get<2>(std::move(const_v)));
     EXPECT_TRUE((std::is_same<ConstLValueGetType, const double&>::value));
     EXPECT_TRUE((std::is_same<ConstRValueGetType, const double&&>::value));
     EXPECT_EQ(absl::get<double>(const_v), 2.);
-    EXPECT_EQ(absl::get<double>(absl::move(const_v)), 2.);
+    EXPECT_EQ(absl::get<double>(std::move(const_v)), 2.);
   }
 }
 
@@ -1825,13 +1825,13 @@
     int operator()(std::string&&, std::string&&) const { return 3; }  // NOLINT
   };
   EXPECT_FALSE(absl::visit(Visitor{}, v));
-  EXPECT_TRUE(absl::visit(Visitor{}, absl::move(v)));
+  EXPECT_TRUE(absl::visit(Visitor{}, std::move(v)));
 
   // Also test the variadic overload.
   EXPECT_EQ(0, absl::visit(Visitor{}, v, v));
-  EXPECT_EQ(1, absl::visit(Visitor{}, v, absl::move(v)));
-  EXPECT_EQ(2, absl::visit(Visitor{}, absl::move(v), v));
-  EXPECT_EQ(3, absl::visit(Visitor{}, absl::move(v), absl::move(v)));
+  EXPECT_EQ(1, absl::visit(Visitor{}, v, std::move(v)));
+  EXPECT_EQ(2, absl::visit(Visitor{}, std::move(v), v));
+  EXPECT_EQ(3, absl::visit(Visitor{}, std::move(v), std::move(v)));
 }
 
 TEST(VariantTest, VisitRValueVisitor) {
@@ -1862,12 +1862,12 @@
       (std::is_same<LValue_LValue, decltype(absl::visit(visitor, v))>::value));
   EXPECT_TRUE(
       (std::is_same<RValue_LValue,
-                    decltype(absl::visit(visitor, absl::move(v)))>::value));
+                    decltype(absl::visit(visitor, std::move(v)))>::value));
   EXPECT_TRUE((
       std::is_same<LValue_RValue, decltype(absl::visit(Visitor{}, v))>::value));
   EXPECT_TRUE(
       (std::is_same<RValue_RValue,
-                    decltype(absl::visit(Visitor{}, absl::move(v)))>::value));
+                    decltype(absl::visit(Visitor{}, std::move(v)))>::value));
 }
 
 TEST(VariantTest, VisitVariadic) {
@@ -2225,7 +2225,7 @@
   EXPECT_TRUE(absl::holds_alternative<std::unique_ptr<int>>(v));
 
   // Construct a variant by moving from another variant.
-  Variant v2(absl::move(v));
+  Variant v2(std::move(v));
   ASSERT_TRUE(absl::holds_alternative<std::unique_ptr<int>>(v2));
   ASSERT_NE(nullptr, absl::get<std::unique_ptr<int>>(v2));
   EXPECT_EQ(10, *absl::get<std::unique_ptr<int>>(v2));
@@ -2242,7 +2242,7 @@
   EXPECT_EQ("foo", *absl::get<std::unique_ptr<std::string>>(v));
 
   // Move-assign a variant.
-  v2 = absl::move(v);
+  v2 = std::move(v);
   ASSERT_TRUE(absl::holds_alternative<std::unique_ptr<std::string>>(v2));
   EXPECT_EQ("foo", *absl::get<std::unique_ptr<std::string>>(v2));
   EXPECT_TRUE(absl::holds_alternative<std::unique_ptr<std::string>>(v));
@@ -2568,7 +2568,7 @@
   vec.push_back(absl::make_unique<int>(42));
   vec.emplace_back("Hello");
   vec.reserve(3);
-  auto another_vec = absl::move(vec);
+  auto another_vec = std::move(vec);
   // As a sanity check, verify vector contents.
   ASSERT_EQ(2u, another_vec.size());
   EXPECT_EQ(42, *absl::get<std::unique_ptr<int>>(another_vec[0]));
diff --git a/absl/utility/utility.h b/absl/utility/utility.h
index fc0d1f6..ebbb49b 100644
--- a/absl/utility/utility.h
+++ b/absl/utility/utility.h
@@ -51,11 +51,14 @@
 // abstractions for platforms that had not yet provided them. Those
 // platforms are no longer supported. New code should simply use the
 // the ones from std directly.
+using std::exchange;
+using std::forward;
 using std::index_sequence;
 using std::index_sequence_for;
 using std::integer_sequence;
 using std::make_index_sequence;
 using std::make_integer_sequence;
+using std::move;
 
 namespace utility_internal {
 
@@ -129,27 +132,6 @@
 void in_place_index(utility_internal::InPlaceIndexTag<I>) {}
 #endif  // ABSL_USES_STD_VARIANT
 
-// Constexpr move and forward
-
-// move()
-//
-// A constexpr version of `std::move()`, designed to be a drop-in replacement
-// for C++14's `std::move()`.
-template <typename T>
-constexpr absl::remove_reference_t<T>&& move(T&& t) noexcept {
-  return static_cast<absl::remove_reference_t<T>&&>(t);
-}
-
-// forward()
-//
-// A constexpr version of `std::forward()`, designed to be a drop-in replacement
-// for C++14's `std::forward()`.
-template <typename T>
-constexpr T&& forward(
-    absl::remove_reference_t<T>& t) noexcept {  // NOLINT(runtime/references)
-  return static_cast<T&&>(t);
-}
-
 namespace utility_internal {
 // Helper method for expanding tuple into a called method.
 template <typename Functor, typename Tuple, std::size_t... Indexes>
@@ -215,26 +197,6 @@
           typename std::remove_reference<Tuple>::type>::value>{});
 }
 
-// exchange
-//
-// Replaces the value of `obj` with `new_value` and returns the old value of
-// `obj`.  `absl::exchange` is designed to be a drop-in replacement for C++14's
-// `std::exchange`.
-//
-// Example:
-//
-//   Foo& operator=(Foo&& other) {
-//     ptr1_ = absl::exchange(other.ptr1_, nullptr);
-//     int1_ = absl::exchange(other.int1_, -1);
-//     return *this;
-//   }
-template <typename T, typename U = T>
-T exchange(T& obj, U&& new_value) {
-  T old_value = absl::move(obj);
-  obj = absl::forward<U>(new_value);
-  return old_value;
-}
-
 namespace utility_internal {
 template <typename T, typename Tuple, size_t... I>
 T make_from_tuple_impl(Tuple&& tup, absl::index_sequence<I...>) {
diff --git a/absl/utility/utility_test.cc b/absl/utility/utility_test.cc
index 1af6813..c540b22 100644
--- a/absl/utility/utility_test.cc
+++ b/absl/utility/utility_test.cc
@@ -205,14 +205,6 @@
   EXPECT_EQ(42, absl::apply(&FlipFlop::member, std::make_tuple(obj)));
 }
 
-TEST(ExchangeTest, MoveOnly) {
-  auto a = Factory(1);
-  EXPECT_EQ(1, *a);
-  auto b = absl::exchange(a, Factory(2));
-  EXPECT_EQ(2, *a);
-  EXPECT_EQ(1, *b);
-}
-
 TEST(MakeFromTupleTest, String) {
   EXPECT_EQ(
       absl::make_from_tuple<std::string>(std::make_tuple("hello world", 5)),
diff --git a/ci/cmake_common.sh b/ci/cmake_common.sh
index 784b381..c8a5b85 100644
--- a/ci/cmake_common.sh
+++ b/ci/cmake_common.sh
@@ -14,16 +14,6 @@
 
 # The commit of GoogleTest to be used in the CMake tests in this directory.
 # Keep this in sync with the commit in the WORKSPACE file.
-# TODO(dmauro): After the next GoogleTest release, use the stable file required
-# by Bzlmod.  This means downloading a copy of the file and reuploading it to
-# avoid changing checksums if the compression is changed by GitHub.  It also
-# means stop referring to it as a commit and instead use the uploaded filename.
-readonly ABSL_GOOGLETEST_COMMIT="f8d7d77c06936315286eb55f8de22cd23c188571"
+readonly ABSL_GOOGLETEST_VERSION="1.15.2"
 
-# Avoid depending on GitHub by looking for a cached copy of the commit first.
-if [[ -r "${KOKORO_GFILE_DIR:-}/distdir/${ABSL_GOOGLETEST_COMMIT}.zip" ]]; then
-  DOCKER_EXTRA_ARGS="--mount type=bind,source=${KOKORO_GFILE_DIR}/distdir,target=/distdir,readonly ${DOCKER_EXTRA_ARGS:-}"
-  ABSL_GOOGLETEST_DOWNLOAD_URL="file:///distdir/${ABSL_GOOGLETEST_COMMIT}.zip"
-else
-  ABSL_GOOGLETEST_DOWNLOAD_URL="https://github.com/google/googletest/archive/${ABSL_GOOGLETEST_COMMIT}.zip"
-fi
+readonly ABSL_GOOGLETEST_DOWNLOAD_URL="https://github.com/google/googletest/releases/download/v${ABSL_GOOGLETEST_VERSION}/googletest-${ABSL_GOOGLETEST_VERSION}.tar.gz"
diff --git a/ci/cmake_install_test.sh b/ci/cmake_install_test.sh
index ab3b86f..871490f 100755
--- a/ci/cmake_install_test.sh
+++ b/ci/cmake_install_test.sh
@@ -48,7 +48,7 @@
     --tmpfs=/abseil-cpp:exec \
     --workdir=/abseil-cpp \
     --cap-add=SYS_PTRACE \
-    -e "ABSL_GOOGLETEST_COMMIT=${ABSL_GOOGLETEST_COMMIT}" \
+    -e "ABSL_GOOGLETEST_VERSION=${ABSL_GOOGLETEST_VERSION}" \
     -e "ABSL_GOOGLETEST_DOWNLOAD_URL=${ABSL_GOOGLETEST_DOWNLOAD_URL}" \
     -e "LINK_TYPE=${link_type}" \
     --rm \
diff --git a/ci/linux_docker_containers.sh b/ci/linux_docker_containers.sh
index 232233d..fefef92 100644
--- a/ci/linux_docker_containers.sh
+++ b/ci/linux_docker_containers.sh
@@ -16,7 +16,7 @@
 # Test scripts should source this file to get the identifiers.
 
 readonly LINUX_ALPINE_CONTAINER="gcr.io/google.com/absl-177019/alpine:20230612"
-readonly LINUX_CLANG_LATEST_CONTAINER="gcr.io/google.com/absl-177019/linux_hybrid-latest:20231218"
+readonly LINUX_CLANG_LATEST_CONTAINER="gcr.io/google.com/absl-177019/linux_hybrid-latest:20240523"
 readonly LINUX_ARM_CLANG_LATEST_CONTAINER="gcr.io/google.com/absl-177019/linux_arm_hybrid-latest:20231219"
-readonly LINUX_GCC_LATEST_CONTAINER="gcr.io/google.com/absl-177019/linux_hybrid-latest:20231218"
-readonly LINUX_GCC_FLOOR_CONTAINER="gcr.io/google.com/absl-177019/linux_gcc-floor:20230120"
+readonly LINUX_GCC_LATEST_CONTAINER="gcr.io/google.com/absl-177019/linux_hybrid-latest:20240523"
+readonly LINUX_GCC_FLOOR_CONTAINER="gcr.io/google.com/absl-177019/linux_gcc-floor:20240717"
diff --git a/ci/linux_gcc-floor_libstdcxx_bazel.sh b/ci/linux_gcc-floor_libstdcxx_bazel.sh
index 5bd1dbf..b2d8c1d 100755
--- a/ci/linux_gcc-floor_libstdcxx_bazel.sh
+++ b/ci/linux_gcc-floor_libstdcxx_bazel.sh
@@ -59,9 +59,6 @@
   BAZEL_EXTRA_ARGS="--distdir=/distdir ${BAZEL_EXTRA_ARGS:-}"
 fi
 
-# TODO(absl-team): This currently uses Bazel 5. When upgrading to a version
-# of Bazel that supports Bzlmod, add --enable_bzlmod=false to keep test
-# coverage for the old WORKSPACE dependency management.
 for std in ${STD}; do
   for compilation_mode in ${COMPILATION_MODE}; do
     for exceptions_mode in ${EXCEPTIONS_MODE}; do
@@ -82,6 +79,7 @@
           --copt=-Werror \
           --define="absl=1" \
           --distdir="/bazel-distdir" \
+          --enable_bzlmod=false \
           --features=external_include_paths \
           --keep_going \
           --show_timestamps \
diff --git a/ci/linux_gcc-latest_libstdcxx_cmake.sh b/ci/linux_gcc-latest_libstdcxx_cmake.sh
index 1f72123..243901c 100755
--- a/ci/linux_gcc-latest_libstdcxx_cmake.sh
+++ b/ci/linux_gcc-latest_libstdcxx_cmake.sh
@@ -34,33 +34,46 @@
   ABSL_CMAKE_BUILD_SHARED="OFF ON"
 fi
 
+if [[ -z ${ABSL_CMAKE_BUILD_MONOLITHIC_SHARED_LIBS:-} ]]; then
+  ABSL_CMAKE_BUILD_MONOLITHIC_SHARED_LIBS="OFF ON"
+fi
+
 source "${ABSEIL_ROOT}/ci/linux_docker_containers.sh"
 readonly DOCKER_CONTAINER=${LINUX_GCC_LATEST_CONTAINER}
 
 for std in ${ABSL_CMAKE_CXX_STANDARDS}; do
   for compilation_mode in ${ABSL_CMAKE_BUILD_TYPES}; do
     for build_shared in ${ABSL_CMAKE_BUILD_SHARED}; do
-      time docker run \
-        --mount type=bind,source="${ABSEIL_ROOT}",target=/abseil-cpp,readonly \
-        --tmpfs=/buildfs:exec \
-        --workdir=/buildfs \
-        --cap-add=SYS_PTRACE \
-        --rm \
-        -e CFLAGS="-Werror" \
-        -e CXXFLAGS="-Werror" \
-        ${DOCKER_EXTRA_ARGS:-} \
-        "${DOCKER_CONTAINER}" \
-        /bin/bash -c "
-          cmake /abseil-cpp \
-            -DABSL_GOOGLETEST_DOWNLOAD_URL=${ABSL_GOOGLETEST_DOWNLOAD_URL} \
-            -DBUILD_SHARED_LIBS=${build_shared} \
-            -DABSL_BUILD_TESTING=ON \
-            -DCMAKE_BUILD_TYPE=${compilation_mode} \
-            -DCMAKE_CXX_STANDARD=${std} \
-            -DCMAKE_MODULE_LINKER_FLAGS=\"-Wl,--no-undefined\" && \
-          make -j$(nproc) && \
-          TZDIR=/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo \
-          ctest -j$(nproc) --output-on-failure"
+      if [[ $build_shared == "OFF" ]]; then
+        monolithic_shared_options="OFF"
+      else
+        monolithic_shared_options="$ABSL_CMAKE_BUILD_MONOLITHIC_SHARED_LIBS"
+      fi
+
+      for monolithic_shared in $monolithic_shared_options; do
+        time docker run \
+          --mount type=bind,source="${ABSEIL_ROOT}",target=/abseil-cpp,readonly \
+          --tmpfs=/buildfs:exec \
+          --workdir=/buildfs \
+          --cap-add=SYS_PTRACE \
+          --rm \
+          -e CFLAGS="-Werror" \
+          -e CXXFLAGS="-Werror" \
+          ${DOCKER_EXTRA_ARGS:-} \
+          "${DOCKER_CONTAINER}" \
+          /bin/bash -c "
+            cmake /abseil-cpp \
+              -DABSL_GOOGLETEST_DOWNLOAD_URL=${ABSL_GOOGLETEST_DOWNLOAD_URL} \
+              -DBUILD_SHARED_LIBS=${build_shared} \
+              -DABSL_BUILD_TESTING=ON \
+              -DCMAKE_BUILD_TYPE=${compilation_mode} \
+              -DCMAKE_CXX_STANDARD=${std} \
+              -DABSL_BUILD_MONOLITHIC_SHARED_LIBS=${monolithic_shared} \
+              -DCMAKE_MODULE_LINKER_FLAGS=\"-Wl,--no-undefined\" && \
+            make -j$(nproc) && \
+            TZDIR=/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo \
+            ctest -j$(nproc) --output-on-failure"
+        done
     done
   done
 done
diff --git a/ci/linux_gcc_alpine_cmake.sh b/ci/linux_gcc_alpine_cmake.sh
index b784456..f19954f 100755
--- a/ci/linux_gcc_alpine_cmake.sh
+++ b/ci/linux_gcc_alpine_cmake.sh
@@ -34,32 +34,46 @@
   ABSL_CMAKE_BUILD_SHARED="OFF ON"
 fi
 
+if [[ -z ${ABSL_CMAKE_BUILD_MONOLITHIC_SHARED_LIBS:-} ]]; then
+  ABSL_CMAKE_BUILD_MONOLITHIC_SHARED_LIBS="OFF ON"
+fi
+
 source "${ABSEIL_ROOT}/ci/linux_docker_containers.sh"
 readonly DOCKER_CONTAINER=${LINUX_ALPINE_CONTAINER}
 
 for std in ${ABSL_CMAKE_CXX_STANDARDS}; do
   for compilation_mode in ${ABSL_CMAKE_BUILD_TYPES}; do
     for build_shared in ${ABSL_CMAKE_BUILD_SHARED}; do
-      time docker run \
-        --mount type=bind,source="${ABSEIL_ROOT}",target=/abseil-cpp,readonly \
-        --tmpfs=/buildfs:exec \
-        --workdir=/buildfs \
-        --cap-add=SYS_PTRACE \
-        --rm \
-        -e CFLAGS="-Werror" \
-        -e CXXFLAGS="-Werror" \
-        ${DOCKER_EXTRA_ARGS:-} \
-        "${DOCKER_CONTAINER}" \
-        /bin/sh -c "
-          cmake /abseil-cpp \
-            -DABSL_GOOGLETEST_DOWNLOAD_URL=${ABSL_GOOGLETEST_DOWNLOAD_URL} \
-            -DABSL_BUILD_TESTING=ON \
-            -DCMAKE_BUILD_TYPE=${compilation_mode} \
-            -DCMAKE_CXX_STANDARD=${std} \
-            -DCMAKE_MODULE_LINKER_FLAGS=\"-Wl,--no-undefined\" && \
-          make -j$(nproc) && \
-          TZDIR=/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo \
-          ctest -j$(nproc) --output-on-failure"
+      if [[ $build_shared == "OFF" ]]; then
+        monolithic_shared_options="OFF"
+      else
+        monolithic_shared_options="$ABSL_CMAKE_BUILD_MONOLITHIC_SHARED_LIBS"
+      fi
+
+      for monolithic_shared in $monolithic_shared_options; do
+        time docker run \
+          --mount type=bind,source="${ABSEIL_ROOT}",target=/abseil-cpp,readonly \
+          --tmpfs=/buildfs:exec \
+          --workdir=/buildfs \
+          --cap-add=SYS_PTRACE \
+          --rm \
+          -e CFLAGS="-Werror" \
+          -e CXXFLAGS="-Werror" \
+          ${DOCKER_EXTRA_ARGS:-} \
+          "${DOCKER_CONTAINER}" \
+          /bin/sh -c "
+            cmake /abseil-cpp \
+              -DABSL_GOOGLETEST_DOWNLOAD_URL=${ABSL_GOOGLETEST_DOWNLOAD_URL} \
+              -DBUILD_SHARED_LIBS=${build_shared} \
+              -DABSL_BUILD_TESTING=ON \
+              -DCMAKE_BUILD_TYPE=${compilation_mode} \
+              -DCMAKE_CXX_STANDARD=${std} \
+              -DABSL_BUILD_MONOLITHIC_SHARED_LIBS=${monolithic_shared} \
+              -DCMAKE_MODULE_LINKER_FLAGS=\"-Wl,--no-undefined\" && \
+            make -j$(nproc) && \
+            TZDIR=/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo \
+            ctest -j$(nproc) --output-on-failure"
+        done
     done
   done
 done
diff --git a/ci/macos_xcode_cmake.sh b/ci/macos_xcode_cmake.sh
index 690f86b..eba2fb5 100755
--- a/ci/macos_xcode_cmake.sh
+++ b/ci/macos_xcode_cmake.sh
@@ -23,11 +23,6 @@
 
 source "${ABSEIL_ROOT}/ci/cmake_common.sh"
 
-# The MacOS build doesn't run in a docker container, so we have to override ABSL_GOOGLETEST_DOWNLOAD_URL.
-if [[ -r "${KOKORO_GFILE_DIR}/distdir/${ABSL_GOOGLETEST_COMMIT}.zip" ]]; then
-  ABSL_GOOGLETEST_DOWNLOAD_URL="file://${KOKORO_GFILE_DIR}/distdir/${ABSL_GOOGLETEST_COMMIT}.zip"
-fi
-
 if [[ -z ${ABSL_CMAKE_BUILD_TYPES:-} ]]; then
   ABSL_CMAKE_BUILD_TYPES="Debug"
 fi
@@ -36,22 +31,35 @@
   ABSL_CMAKE_BUILD_SHARED="OFF ON"
 fi
 
+if [[ -z ${ABSL_CMAKE_BUILD_MONOLITHIC_SHARED_LIBS:-} ]]; then
+  ABSL_CMAKE_BUILD_MONOLITHIC_SHARED_LIBS="OFF ON"
+fi
+
 for compilation_mode in ${ABSL_CMAKE_BUILD_TYPES}; do
   for build_shared in ${ABSL_CMAKE_BUILD_SHARED}; do
-    BUILD_DIR=$(mktemp -d ${compilation_mode}.XXXXXXXX)
-    cd ${BUILD_DIR}
+    if [[ $build_shared == "OFF" ]]; then
+      monolithic_shared_options="OFF"
+    else
+      monolithic_shared_options="$ABSL_CMAKE_BUILD_MONOLITHIC_SHARED_LIBS"
+    fi
 
-    # TODO(absl-team): Enable -Werror once all warnings are fixed.
-    time cmake ${ABSEIL_ROOT} \
-      -GXcode \
-      -DBUILD_SHARED_LIBS=${build_shared} \
-      -DABSL_BUILD_TESTING=ON \
-      -DCMAKE_BUILD_TYPE=${compilation_mode} \
-      -DCMAKE_CXX_STANDARD=14 \
-      -DCMAKE_MODULE_LINKER_FLAGS="-Wl,--no-undefined" \
-      -DABSL_GOOGLETEST_DOWNLOAD_URL="${ABSL_GOOGLETEST_DOWNLOAD_URL}"
-    time cmake --build .
-    time TZDIR=${ABSEIL_ROOT}/absl/time/internal/cctz/testdata/zoneinfo \
-      ctest -C ${compilation_mode} --output-on-failure
+    for monolithic_shared in $monolithic_shared_options; do
+      BUILD_DIR=$(mktemp -d ${compilation_mode}.XXXXXXXX)
+      cd ${BUILD_DIR}
+
+      # TODO(absl-team): Enable -Werror once all warnings are fixed.
+      time cmake ${ABSEIL_ROOT} \
+        -GXcode \
+        -DBUILD_SHARED_LIBS=${build_shared} \
+        -DABSL_BUILD_TESTING=ON \
+        -DCMAKE_BUILD_TYPE=${compilation_mode} \
+        -DCMAKE_CXX_STANDARD=14 \
+        -DCMAKE_MODULE_LINKER_FLAGS="-Wl,--no-undefined" \
+        -DABSL_BUILD_MONOLITHIC_SHARED_LIBS=${monolithic_shared} \
+        -DABSL_GOOGLETEST_DOWNLOAD_URL="${ABSL_GOOGLETEST_DOWNLOAD_URL}"
+      time cmake --build .
+      time TZDIR=${ABSEIL_ROOT}/absl/time/internal/cctz/testdata/zoneinfo \
+        ctest -C ${compilation_mode} --output-on-failure
+    done
   done
 done
diff --git a/ci/windows_clangcl_bazel.bat b/ci/windows_clangcl_bazel.bat
index 5162628..b031c30 100755
--- a/ci/windows_clangcl_bazel.bat
+++ b/ci/windows_clangcl_bazel.bat
@@ -43,13 +43,13 @@
   test ... ^
   --compilation_mode=%COMPILATION_MODE% ^
   --compiler=clang-cl ^
-  --copt=/std:%STD% ^
   --copt=/WX ^
   --copt=-Wno-microsoft-cast ^
+  --cxxopt=/std:%STD% ^
   --define=absl=1 ^
   --distdir=%KOKORO_GFILE_DIR%\distdir ^
   --enable_bzlmod=true ^
-  --extra_execution_platforms=//absl:x64_windows-clang-cl ^
+  --extra_execution_platforms=//:x64_windows-clang-cl ^
   --extra_toolchains=@local_config_cc//:cc-toolchain-x64_windows-clang-cl ^
   --keep_going ^
   --test_env="GTEST_INSTALL_FAILURE_SIGNAL_HANDLER=1" ^
diff --git a/ci/windows_msvc_cmake.bat b/ci/windows_msvc_cmake.bat
index c0f1ac9..c9aee78 100755
--- a/ci/windows_msvc_cmake.bat
+++ b/ci/windows_msvc_cmake.bat
@@ -14,19 +14,10 @@
 
 SETLOCAL ENABLEDELAYEDEXPANSION
 
-:: The commit of GoogleTest to be used in the CMake tests in this directory.
-:: Keep this in sync with the commit in the WORKSPACE file.
-:: TODO(dmauro): After the next GoogleTest release, use the stable file required
-:: by Bzlmod.  This means downloading a copy of the file and reuploading it to
-:: avoid changing checksums if the compression is changed by GitHub.  It also
-:: means stop referring to it as a commit and instead use the uploaded filename.
-SET ABSL_GOOGLETEST_COMMIT=f8d7d77c06936315286eb55f8de22cd23c188571
-
-IF EXIST %KOKORO_GFILE_DIR%\distdir\%ABSL_GOOGLETEST_COMMIT%.zip (
-  SET ABSL_GOOGLETEST_DOWNLOAD_URL=file://%KOKORO_GFILE_DIR%\distdir\%ABSL_GOOGLETEST_COMMIT%.zip
-) ELSE (
-  SET ABSL_GOOGLETEST_DOWNLOAD_URL=https://github.com/google/googletest/archive/%ABSL_GOOGLETEST_COMMIT%.zip
-)
+:: The version of GoogleTest to be used in the CMake tests in this directory.
+:: Keep this in sync with the version in the WORKSPACE file.
+SET ABSL_GOOGLETEST_VERSION=1.15.2
+SET ABSL_GOOGLETEST_DOWNLOAD_URL=https://github.com/google/googletest/releases/download/v%ABSL_GOOGLETEST_VERSION%/googletest-%ABSL_GOOGLETEST_VERSION%.tar.gz
 
 :: Replace '\' with '/' in Windows paths for CMake.
 :: Note that this cannot go inside the IF block above, because BAT files are weird.
@@ -56,7 +47,6 @@
 SET CXXFLAGS="/WX"
 
 %CMAKE_BIN% ^
-  -DABSL_BUILD_TEST_HELPERS=ON ^
   -DABSL_BUILD_TESTING=ON ^
   -DABSL_GOOGLETEST_DOWNLOAD_URL=%ABSL_GOOGLETEST_DOWNLOAD_URL% ^
   -DBUILD_SHARED_LIBS=%ABSL_CMAKE_BUILD_SHARED% ^
diff --git a/create_lts.py b/create_lts.py
index 7e5368e..c20577a 100755
--- a/create_lts.py
+++ b/create_lts.py
@@ -116,20 +116,21 @@
                   datestamp)
       })
   ReplaceStringsInFile(
-      'CMakeLists.txt', {
-          'project(absl LANGUAGES CXX)':
+      'CMakeLists.txt',
+      {
+          'project(absl LANGUAGES CXX)': (
               'project(absl LANGUAGES CXX VERSION {})'.format(datestamp)
-      })
-  # Set the SOVERSION to YYMM.0.0 - The first 0 means we only have ABI
-  # compatible changes, and the second 0 means we can increment it to
-  # mark changes as ABI-compatible, for patch releases.  Note that we
-  # only use the last two digits of the year and the month because the
-  # MacOS linker requires the first part of the SOVERSION to fit into
-  # 16 bits.
-  # https://www.sicpers.info/2013/03/how-to-version-a-mach-o-library/
-  ReplaceStringsInFile(
-      'CMake/AbseilHelpers.cmake',
-      {'SOVERSION 0': 'SOVERSION "{}.0.0"'.format(datestamp[2:6])})
+          ),
+          # Set the SOVERSION to YYMM.0.0 - The first 0 means we only have ABI
+          # compatible changes, and the second 0 means we can increment it to
+          # mark changes as ABI-compatible, for patch releases.  Note that we
+          # only use the last two digits of the year and the month because the
+          # MacOS linker requires the first part of the SOVERSION to fit into
+          # 16 bits.
+          # https://www.sicpers.info/2013/03/how-to-version-a-mach-o-library/
+          'ABSL_SOVERSION 0': 'ABSL_SOVERSION "{}.0.0"'.format(datestamp[2:6]),
+      },
+  )
   StripContentBetweenTags('CMakeLists.txt', '# absl:lts-remove-begin',
                           '# absl:lts-remove-end')