Upgrade crossbeam-utils to 0.8.19 am: 6e8d2d5a50

Original change: https://android-review.googlesource.com/c/platform/external/rust/crates/crossbeam-utils/+/2943531

Change-Id: I813bf8df4bbece4f9e239e30caa5977972318b9a
Signed-off-by: Automerger Merge Worker <[email protected]>
diff --git a/.cargo_vcs_info.json b/.cargo_vcs_info.json
index 415facc..52ab2c1 100644
--- a/.cargo_vcs_info.json
+++ b/.cargo_vcs_info.json
@@ -1,6 +1,6 @@
 {
   "git": {
-    "sha1": "721382b00b5dadd81954ed66764d547e2f1bb7a3"
+    "sha1": "9c3182abebb36bdc9446d75d4644190fef70fa01"
   },
   "path_in_vcs": "crossbeam-utils"
 }
\ No newline at end of file
diff --git a/Android.bp b/Android.bp
index edd8df6..910f862 100644
--- a/Android.bp
+++ b/Android.bp
@@ -44,23 +44,19 @@
     host_supported: true,
     crate_name: "crossbeam_utils",
     cargo_env_compat: true,
-    cargo_pkg_version: "0.8.15",
+    cargo_pkg_version: "0.8.19",
     srcs: ["src/lib.rs"],
     test_suites: ["general-tests"],
     auto_gen_config: true,
     test_options: {
         unit_test: true,
     },
-    edition: "2018",
+    edition: "2021",
     features: [
         "default",
         "std",
     ],
-    rustlibs: [
-        "libcfg_if",
-        "librand",
-    ],
-    proc_macros: ["librustversion"],
+    rustlibs: ["librand"],
 }
 
 rust_test {
@@ -68,24 +64,22 @@
     host_supported: true,
     crate_name: "atomic_cell",
     cargo_env_compat: true,
-    cargo_pkg_version: "0.8.15",
+    cargo_pkg_version: "0.8.19",
     srcs: ["tests/atomic_cell.rs"],
     test_suites: ["general-tests"],
     auto_gen_config: true,
     test_options: {
         unit_test: true,
     },
-    edition: "2018",
+    edition: "2021",
     features: [
         "default",
         "std",
     ],
     rustlibs: [
-        "libcfg_if",
         "libcrossbeam_utils",
         "librand",
     ],
-    proc_macros: ["librustversion"],
 }
 
 rust_test {
@@ -93,24 +87,22 @@
     host_supported: true,
     crate_name: "cache_padded",
     cargo_env_compat: true,
-    cargo_pkg_version: "0.8.15",
+    cargo_pkg_version: "0.8.19",
     srcs: ["tests/cache_padded.rs"],
     test_suites: ["general-tests"],
     auto_gen_config: true,
     test_options: {
         unit_test: true,
     },
-    edition: "2018",
+    edition: "2021",
     features: [
         "default",
         "std",
     ],
     rustlibs: [
-        "libcfg_if",
         "libcrossbeam_utils",
         "librand",
     ],
-    proc_macros: ["librustversion"],
 }
 
 rust_test {
@@ -118,24 +110,22 @@
     host_supported: true,
     crate_name: "parker",
     cargo_env_compat: true,
-    cargo_pkg_version: "0.8.15",
+    cargo_pkg_version: "0.8.19",
     srcs: ["tests/parker.rs"],
     test_suites: ["general-tests"],
     auto_gen_config: true,
     test_options: {
         unit_test: true,
     },
-    edition: "2018",
+    edition: "2021",
     features: [
         "default",
         "std",
     ],
     rustlibs: [
-        "libcfg_if",
         "libcrossbeam_utils",
         "librand",
     ],
-    proc_macros: ["librustversion"],
 }
 
 rust_test {
@@ -143,24 +133,22 @@
     host_supported: true,
     crate_name: "sharded_lock",
     cargo_env_compat: true,
-    cargo_pkg_version: "0.8.15",
+    cargo_pkg_version: "0.8.19",
     srcs: ["tests/sharded_lock.rs"],
     test_suites: ["general-tests"],
     auto_gen_config: true,
     test_options: {
         unit_test: true,
     },
-    edition: "2018",
+    edition: "2021",
     features: [
         "default",
         "std",
     ],
     rustlibs: [
-        "libcfg_if",
         "libcrossbeam_utils",
         "librand",
     ],
-    proc_macros: ["librustversion"],
 }
 
 rust_test {
@@ -168,24 +156,22 @@
     host_supported: true,
     crate_name: "thread",
     cargo_env_compat: true,
-    cargo_pkg_version: "0.8.15",
+    cargo_pkg_version: "0.8.19",
     srcs: ["tests/thread.rs"],
     test_suites: ["general-tests"],
     auto_gen_config: true,
     test_options: {
         unit_test: true,
     },
-    edition: "2018",
+    edition: "2021",
     features: [
         "default",
         "std",
     ],
     rustlibs: [
-        "libcfg_if",
         "libcrossbeam_utils",
         "librand",
     ],
-    proc_macros: ["librustversion"],
 }
 
 rust_test {
@@ -193,24 +179,22 @@
     host_supported: true,
     crate_name: "wait_group",
     cargo_env_compat: true,
-    cargo_pkg_version: "0.8.15",
+    cargo_pkg_version: "0.8.19",
     srcs: ["tests/wait_group.rs"],
     test_suites: ["general-tests"],
     auto_gen_config: true,
     test_options: {
         unit_test: true,
     },
-    edition: "2018",
+    edition: "2021",
     features: [
         "default",
         "std",
     ],
     rustlibs: [
-        "libcfg_if",
         "libcrossbeam_utils",
         "librand",
     ],
-    proc_macros: ["librustversion"],
 }
 
 rust_library {
@@ -218,14 +202,13 @@
     host_supported: true,
     crate_name: "crossbeam_utils",
     cargo_env_compat: true,
-    cargo_pkg_version: "0.8.15",
+    cargo_pkg_version: "0.8.19",
     srcs: ["src/lib.rs"],
-    edition: "2018",
+    edition: "2021",
     features: [
         "default",
         "std",
     ],
-    rustlibs: ["libcfg_if"],
     apex_available: [
         "//apex_available:platform",
         "com.android.virt",
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 994b6c3..f6f3463 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,26 @@
+# Version 0.8.19
+
+- Remove dependency on `cfg-if`. (#1072)
+
+# Version 0.8.18
+
+- Relax the minimum supported Rust version to 1.60. (#1056)
+- Improve scalability of `AtomicCell` fallback. (#1055)
+
+# Version 0.8.17
+
+- Bump the minimum supported Rust version to 1.61. (#1037)
+- Improve support for targets without atomic CAS or 64-bit atomic. (#1037)
+- Always implement `UnwindSafe` and `RefUnwindSafe` for `AtomicCell`. (#1045)
+- Improve compatibility with Miri, TSan, and loom. (#995, #1003)
+- Improve compatibility with unstable `oom=panic`. (#1045)
+- Improve implementation of `CachePadded`. (#1014, #1025)
+- Update `loom` dependency to 0.7.
+
+# Version 0.8.16
+
+- Improve implementation of `CachePadded`. (#967)
+
 # Version 0.8.15
 
 - Add `#[clippy::has_significant_drop]` to `ShardedLock{Read,Write}Guard`. (#958)
diff --git a/Cargo.toml b/Cargo.toml
index a99b591..180f721 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -10,10 +10,10 @@
 # See Cargo.toml.orig for the original contents.
 
 [package]
-edition = "2018"
-rust-version = "1.38"
+edition = "2021"
+rust-version = "1.60"
 name = "crossbeam-utils"
-version = "0.8.15"
+version = "0.8.19"
 description = "Utilities for concurrent programming"
 homepage = "https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-utils"
 readme = "README.md"
@@ -32,20 +32,16 @@
 license = "MIT OR Apache-2.0"
 repository = "https://github.com/crossbeam-rs/crossbeam"
 
-[dependencies.cfg-if]
-version = "1"
+[dependencies]
 
 [dev-dependencies.rand]
 version = "0.8"
 
-[dev-dependencies.rustversion]
-version = "1"
-
 [features]
 default = ["std"]
 nightly = []
 std = []
 
 [target."cfg(crossbeam_loom)".dependencies.loom]
-version = "0.5"
+version = "0.7.1"
 optional = true
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
index 155fcc1..b9a2756 100644
--- a/Cargo.toml.orig
+++ b/Cargo.toml.orig
@@ -4,9 +4,9 @@
 # - Update CHANGELOG.md
 # - Update README.md
 # - Create "crossbeam-utils-X.Y.Z" git tag
-version = "0.8.15"
-edition = "2018"
-rust-version = "1.38"
+version = "0.8.19"
+edition = "2021"
+rust-version = "1.60"
 license = "MIT OR Apache-2.0"
 repository = "https://github.com/crossbeam-rs/crossbeam"
 homepage = "https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-utils"
@@ -31,15 +31,13 @@
 nightly = []
 
 [dependencies]
-cfg-if = "1"
 
 # Enable the use of loom for concurrency testing.
 #
 # NOTE: This feature is outside of the normal semver guarantees and minor or
 # patch versions of crossbeam may make breaking changes to them at any time.
 [target.'cfg(crossbeam_loom)'.dependencies]
-loom = { version = "0.5", optional = true }
+loom = { version = "0.7.1", optional = true }
 
 [dev-dependencies]
 rand = "0.8"
-rustversion = "1"
diff --git a/METADATA b/METADATA
index 8121ef5..7ae191c 100644
--- a/METADATA
+++ b/METADATA
@@ -1,23 +1,20 @@
 # This project was upgraded with external_updater.
-# Usage: tools/external_updater/updater.sh update rust/crates/crossbeam-utils
-# For more info, check https://cs.android.com/android/platform/superproject/+/master:tools/external_updater/README.md
+# Usage: tools/external_updater/updater.sh update external/rust/crates/crossbeam-utils
+# For more info, check https://cs.android.com/android/platform/superproject/+/main:tools/external_updater/README.md
 
 name: "crossbeam-utils"
 description: "Utilities for concurrent programming"
 third_party {
-  url {
-    type: HOMEPAGE
-    value: "https://crates.io/crates/crossbeam-utils"
-  }
-  url {
-    type: ARCHIVE
-    value: "https://static.crates.io/crates/crossbeam-utils/crossbeam-utils-0.8.15.crate"
-  }
-  version: "0.8.15"
   license_type: NOTICE
   last_upgrade_date {
-    year: 2023
-    month: 3
-    day: 6
+    year: 2024
+    month: 2
+    day: 1
+  }
+  homepage: "https://crates.io/crates/crossbeam-utils"
+  identifier {
+    type: "Archive"
+    value: "https://static.crates.io/crates/crossbeam-utils/crossbeam-utils-0.8.19.crate"
+    version: "0.8.19"
   }
 }
diff --git a/README.md b/README.md
index c06ea60..7d6a679 100644
--- a/README.md
+++ b/README.md
@@ -8,7 +8,7 @@
 https://crates.io/crates/crossbeam-utils)
 [![Documentation](https://docs.rs/crossbeam-utils/badge.svg)](
 https://docs.rs/crossbeam-utils)
-[![Rust 1.38+](https://img.shields.io/badge/rust-1.38+-lightgray.svg)](
+[![Rust 1.60+](https://img.shields.io/badge/rust-1.60+-lightgray.svg)](
 https://www.rust-lang.org)
 [![chat](https://img.shields.io/discord/569610676205781012.svg?logo=discord)](https://discord.com/invite/JXYwgWZ)
 
@@ -55,7 +55,7 @@
 
 Crossbeam Utils supports stable Rust releases going back at least six months,
 and every time the minimum supported Rust version is increased, a new minor
-version is released. Currently, the minimum supported Rust version is 1.38.
+version is released. Currently, the minimum supported Rust version is 1.60.
 
 ## License
 
diff --git a/build.rs b/build.rs
index 617162f..c71c231 100644
--- a/build.rs
+++ b/build.rs
@@ -1,24 +1,12 @@
 // The rustc-cfg listed below are considered public API, but it is *unstable*
 // and outside of the normal semver guarantees:
 //
-// - `crossbeam_no_atomic_cas`
-//      Assume the target does *not* support atomic CAS operations.
-//      This is usually detected automatically by the build script, but you may
-//      need to enable it manually when building for custom targets or using
-//      non-cargo build systems that don't run the build script.
-//
 // - `crossbeam_no_atomic`
 //      Assume the target does *not* support any atomic operations.
 //      This is usually detected automatically by the build script, but you may
 //      need to enable it manually when building for custom targets or using
 //      non-cargo build systems that don't run the build script.
 //
-// - `crossbeam_no_atomic_64`
-//      Assume the target does *not* support AtomicU64/AtomicI64.
-//      This is usually detected automatically by the build script, but you may
-//      need to enable it manually when building for custom targets or using
-//      non-cargo build systems that don't run the build script.
-//
 // With the exceptions mentioned above, the rustc-cfg emitted by the build
 // script are *not* public API.
 
@@ -30,6 +18,8 @@
 include!("build-common.rs");
 
 fn main() {
+    println!("cargo:rerun-if-changed=no_atomic.rs");
+
     let target = match env::var("TARGET") {
         Ok(target) => convert_custom_linux_target(target),
         Err(e) => {
@@ -45,17 +35,13 @@
     // Note that this is `no_`*, not `has_*`. This allows treating as the latest
     // stable rustc is used when the build script doesn't run. This is useful
     // for non-cargo build systems that don't run the build script.
-    if NO_ATOMIC_CAS.contains(&&*target) {
-        println!("cargo:rustc-cfg=crossbeam_no_atomic_cas");
-    }
     if NO_ATOMIC.contains(&&*target) {
         println!("cargo:rustc-cfg=crossbeam_no_atomic");
-        println!("cargo:rustc-cfg=crossbeam_no_atomic_64");
-    } else if NO_ATOMIC_64.contains(&&*target) {
-        println!("cargo:rustc-cfg=crossbeam_no_atomic_64");
-    } else {
-        // Otherwise, assuming `"max-atomic-width" == 64` or `"max-atomic-width" == 128`.
     }
 
-    println!("cargo:rerun-if-changed=no_atomic.rs");
+    // `cfg(sanitize = "..")` is not stabilized.
+    let sanitize = env::var("CARGO_CFG_SANITIZE").unwrap_or_default();
+    if sanitize.contains("thread") {
+        println!("cargo:rustc-cfg=crossbeam_sanitize_thread");
+    }
 }
diff --git a/no_atomic.rs b/no_atomic.rs
index beb11b0..b97f397 100644
--- a/no_atomic.rs
+++ b/no_atomic.rs
@@ -1,80 +1,7 @@
 // This file is @generated by no_atomic.sh.
 // It is not intended for manual editing.
 
-const NO_ATOMIC_CAS: &[&str] = &[
-    "armv4t-none-eabi",
-    "armv5te-none-eabi",
-    "avr-unknown-gnu-atmega328",
-    "bpfeb-unknown-none",
-    "bpfel-unknown-none",
-    "msp430-none-elf",
-    "riscv32i-unknown-none-elf",
-    "riscv32im-unknown-none-elf",
-    "riscv32imc-unknown-none-elf",
-    "thumbv4t-none-eabi",
-    "thumbv5te-none-eabi",
-    "thumbv6m-none-eabi",
-];
-
-#[allow(dead_code)] // Only crossbeam-utils uses this.
-const NO_ATOMIC_64: &[&str] = &[
-    "arm-linux-androideabi",
-    "armebv7r-none-eabi",
-    "armebv7r-none-eabihf",
-    "armv4t-none-eabi",
-    "armv4t-unknown-linux-gnueabi",
-    "armv5te-none-eabi",
-    "armv5te-unknown-linux-gnueabi",
-    "armv5te-unknown-linux-musleabi",
-    "armv5te-unknown-linux-uclibceabi",
-    "armv6k-nintendo-3ds",
-    "armv7-sony-vita-newlibeabihf",
-    "armv7r-none-eabi",
-    "armv7r-none-eabihf",
-    "avr-unknown-gnu-atmega328",
-    "hexagon-unknown-linux-musl",
-    "m68k-unknown-linux-gnu",
-    "mips-unknown-linux-gnu",
-    "mips-unknown-linux-musl",
-    "mips-unknown-linux-uclibc",
-    "mipsel-sony-psp",
-    "mipsel-sony-psx",
-    "mipsel-unknown-linux-gnu",
-    "mipsel-unknown-linux-musl",
-    "mipsel-unknown-linux-uclibc",
-    "mipsel-unknown-none",
-    "mipsisa32r6-unknown-linux-gnu",
-    "mipsisa32r6el-unknown-linux-gnu",
-    "msp430-none-elf",
-    "powerpc-unknown-freebsd",
-    "powerpc-unknown-linux-gnu",
-    "powerpc-unknown-linux-gnuspe",
-    "powerpc-unknown-linux-musl",
-    "powerpc-unknown-netbsd",
-    "powerpc-unknown-openbsd",
-    "powerpc-wrs-vxworks",
-    "powerpc-wrs-vxworks-spe",
-    "riscv32gc-unknown-linux-gnu",
-    "riscv32gc-unknown-linux-musl",
-    "riscv32i-unknown-none-elf",
-    "riscv32im-unknown-none-elf",
-    "riscv32imac-unknown-none-elf",
-    "riscv32imac-unknown-xous-elf",
-    "riscv32imc-unknown-none-elf",
-    "thumbv4t-none-eabi",
-    "thumbv5te-none-eabi",
-    "thumbv6m-none-eabi",
-    "thumbv7em-none-eabi",
-    "thumbv7em-none-eabihf",
-    "thumbv7m-none-eabi",
-    "thumbv8m.base-none-eabi",
-    "thumbv8m.main-none-eabi",
-    "thumbv8m.main-none-eabihf",
-];
-
-#[allow(dead_code)] // Only crossbeam-utils uses this.
 const NO_ATOMIC: &[&str] = &[
-    "avr-unknown-gnu-atmega328",
     "bpfeb-unknown-none",
     "bpfel-unknown-none",
     "mipsel-sony-psx",
diff --git a/src/atomic/atomic_cell.rs b/src/atomic/atomic_cell.rs
index 7941c5c..06ccf2e 100644
--- a/src/atomic/atomic_cell.rs
+++ b/src/atomic/atomic_cell.rs
@@ -1,18 +1,15 @@
 // Necessary for implementing atomic methods for `AtomicUnit`
 #![allow(clippy::unit_arg)]
 
-use crate::primitive::sync::atomic::{self, AtomicBool};
+use crate::primitive::sync::atomic::{self, Ordering};
+use crate::CachePadded;
 use core::cell::UnsafeCell;
 use core::cmp;
 use core::fmt;
 use core::mem::{self, ManuallyDrop, MaybeUninit};
-use core::sync::atomic::Ordering;
-
+use core::panic::{RefUnwindSafe, UnwindSafe};
 use core::ptr;
 
-#[cfg(feature = "std")]
-use std::panic::{RefUnwindSafe, UnwindSafe};
-
 use super::seq_lock::SeqLock;
 
 /// A thread-safe mutable memory location.
@@ -49,9 +46,7 @@
 unsafe impl<T: Send> Send for AtomicCell<T> {}
 unsafe impl<T: Send> Sync for AtomicCell<T> {}
 
-#[cfg(feature = "std")]
 impl<T> UnwindSafe for AtomicCell<T> {}
-#[cfg(feature = "std")]
 impl<T> RefUnwindSafe for AtomicCell<T> {}
 
 impl<T> AtomicCell<T> {
@@ -322,6 +317,36 @@
     }
 }
 
+macro_rules! atomic {
+    // If values of type `$t` can be transmuted into values of the primitive atomic type `$atomic`,
+    // declares variable `$a` of type `$atomic` and executes `$atomic_op`, breaking out of the loop.
+    (@check, $t:ty, $atomic:ty, $a:ident, $atomic_op:expr) => {
+        if can_transmute::<$t, $atomic>() {
+            let $a: &$atomic;
+            break $atomic_op;
+        }
+    };
+
+    // If values of type `$t` can be transmuted into values of a primitive atomic type, declares
+    // variable `$a` of that type and executes `$atomic_op`. Otherwise, just executes
+    // `$fallback_op`.
+    ($t:ty, $a:ident, $atomic_op:expr, $fallback_op:expr) => {
+        loop {
+            atomic!(@check, $t, AtomicUnit, $a, $atomic_op);
+
+            atomic!(@check, $t, atomic::AtomicU8, $a, $atomic_op);
+            atomic!(@check, $t, atomic::AtomicU16, $a, $atomic_op);
+            atomic!(@check, $t, atomic::AtomicU32, $a, $atomic_op);
+            #[cfg(target_has_atomic = "64")]
+            atomic!(@check, $t, atomic::AtomicU64, $a, $atomic_op);
+            // TODO: AtomicU128 is unstable
+            // atomic!(@check, $t, atomic::AtomicU128, $a, $atomic_op);
+
+            break $fallback_op;
+        }
+    };
+}
+
 macro_rules! impl_arithmetic {
     ($t:ty, fallback, $example:tt) => {
         impl AtomicCell<$t> {
@@ -500,7 +525,7 @@
             }
         }
     };
-    ($t:ty, $atomic:ty, $example:tt) => {
+    ($t:ty, $atomic:ident, $example:tt) => {
         impl AtomicCell<$t> {
             /// Increments the current value by `val` and returns the previous value.
             ///
@@ -518,15 +543,19 @@
             /// ```
             #[inline]
             pub fn fetch_add(&self, val: $t) -> $t {
-                if can_transmute::<$t, $atomic>() {
-                    let a = unsafe { &*(self.as_ptr() as *const $atomic) };
-                    a.fetch_add(val, Ordering::AcqRel)
-                } else {
-                    let _guard = lock(self.as_ptr() as usize).write();
-                    let value = unsafe { &mut *(self.as_ptr()) };
-                    let old = *value;
-                    *value = value.wrapping_add(val);
-                    old
+                atomic! {
+                    $t, _a,
+                    {
+                        let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) };
+                        a.fetch_add(val, Ordering::AcqRel)
+                    },
+                    {
+                        let _guard = lock(self.as_ptr() as usize).write();
+                        let value = unsafe { &mut *(self.as_ptr()) };
+                        let old = *value;
+                        *value = value.wrapping_add(val);
+                        old
+                    }
                 }
             }
 
@@ -546,15 +575,19 @@
             /// ```
             #[inline]
             pub fn fetch_sub(&self, val: $t) -> $t {
-                if can_transmute::<$t, $atomic>() {
-                    let a = unsafe { &*(self.as_ptr() as *const $atomic) };
-                    a.fetch_sub(val, Ordering::AcqRel)
-                } else {
-                    let _guard = lock(self.as_ptr() as usize).write();
-                    let value = unsafe { &mut *(self.as_ptr()) };
-                    let old = *value;
-                    *value = value.wrapping_sub(val);
-                    old
+                atomic! {
+                    $t, _a,
+                    {
+                        let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) };
+                        a.fetch_sub(val, Ordering::AcqRel)
+                    },
+                    {
+                        let _guard = lock(self.as_ptr() as usize).write();
+                        let value = unsafe { &mut *(self.as_ptr()) };
+                        let old = *value;
+                        *value = value.wrapping_sub(val);
+                        old
+                    }
                 }
             }
 
@@ -572,15 +605,19 @@
             /// ```
             #[inline]
             pub fn fetch_and(&self, val: $t) -> $t {
-                if can_transmute::<$t, $atomic>() {
-                    let a = unsafe { &*(self.as_ptr() as *const $atomic) };
-                    a.fetch_and(val, Ordering::AcqRel)
-                } else {
-                    let _guard = lock(self.as_ptr() as usize).write();
-                    let value = unsafe { &mut *(self.as_ptr()) };
-                    let old = *value;
-                    *value &= val;
-                    old
+                atomic! {
+                    $t, _a,
+                    {
+                        let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) };
+                        a.fetch_and(val, Ordering::AcqRel)
+                    },
+                    {
+                        let _guard = lock(self.as_ptr() as usize).write();
+                        let value = unsafe { &mut *(self.as_ptr()) };
+                        let old = *value;
+                        *value &= val;
+                        old
+                    }
                 }
             }
 
@@ -598,15 +635,19 @@
             /// ```
             #[inline]
             pub fn fetch_nand(&self, val: $t) -> $t {
-                if can_transmute::<$t, $atomic>() {
-                    let a = unsafe { &*(self.as_ptr() as *const $atomic) };
-                    a.fetch_nand(val, Ordering::AcqRel)
-                } else {
-                    let _guard = lock(self.as_ptr() as usize).write();
-                    let value = unsafe { &mut *(self.as_ptr()) };
-                    let old = *value;
-                    *value = !(old & val);
-                    old
+                atomic! {
+                    $t, _a,
+                    {
+                        let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) };
+                        a.fetch_nand(val, Ordering::AcqRel)
+                    },
+                    {
+                        let _guard = lock(self.as_ptr() as usize).write();
+                        let value = unsafe { &mut *(self.as_ptr()) };
+                        let old = *value;
+                        *value = !(old & val);
+                        old
+                    }
                 }
             }
 
@@ -624,15 +665,19 @@
             /// ```
             #[inline]
             pub fn fetch_or(&self, val: $t) -> $t {
-                if can_transmute::<$t, $atomic>() {
-                    let a = unsafe { &*(self.as_ptr() as *const $atomic) };
-                    a.fetch_or(val, Ordering::AcqRel)
-                } else {
-                    let _guard = lock(self.as_ptr() as usize).write();
-                    let value = unsafe { &mut *(self.as_ptr()) };
-                    let old = *value;
-                    *value |= val;
-                    old
+                atomic! {
+                    $t, _a,
+                    {
+                        let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) };
+                        a.fetch_or(val, Ordering::AcqRel)
+                    },
+                    {
+                        let _guard = lock(self.as_ptr() as usize).write();
+                        let value = unsafe { &mut *(self.as_ptr()) };
+                        let old = *value;
+                        *value |= val;
+                        old
+                    }
                 }
             }
 
@@ -650,15 +695,19 @@
             /// ```
             #[inline]
             pub fn fetch_xor(&self, val: $t) -> $t {
-                if can_transmute::<$t, $atomic>() {
-                    let a = unsafe { &*(self.as_ptr() as *const $atomic) };
-                    a.fetch_xor(val, Ordering::AcqRel)
-                } else {
-                    let _guard = lock(self.as_ptr() as usize).write();
-                    let value = unsafe { &mut *(self.as_ptr()) };
-                    let old = *value;
-                    *value ^= val;
-                    old
+                atomic! {
+                    $t, _a,
+                    {
+                        let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) };
+                        a.fetch_xor(val, Ordering::AcqRel)
+                    },
+                    {
+                        let _guard = lock(self.as_ptr() as usize).write();
+                        let value = unsafe { &mut *(self.as_ptr()) };
+                        let old = *value;
+                        *value ^= val;
+                        old
+                    }
                 }
             }
 
@@ -677,15 +726,19 @@
             /// ```
             #[inline]
             pub fn fetch_max(&self, val: $t) -> $t {
-                if can_transmute::<$t, $atomic>() {
-                    // TODO: Atomic*::fetch_max requires Rust 1.45.
-                    self.fetch_update(|old| Some(cmp::max(old, val))).unwrap()
-                } else {
-                    let _guard = lock(self.as_ptr() as usize).write();
-                    let value = unsafe { &mut *(self.as_ptr()) };
-                    let old = *value;
-                    *value = cmp::max(old, val);
-                    old
+                atomic! {
+                    $t, _a,
+                    {
+                        let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) };
+                        a.fetch_max(val, Ordering::AcqRel)
+                    },
+                    {
+                        let _guard = lock(self.as_ptr() as usize).write();
+                        let value = unsafe { &mut *(self.as_ptr()) };
+                        let old = *value;
+                        *value = cmp::max(old, val);
+                        old
+                    }
                 }
             }
 
@@ -704,51 +757,50 @@
             /// ```
             #[inline]
             pub fn fetch_min(&self, val: $t) -> $t {
-                if can_transmute::<$t, $atomic>() {
-                    // TODO: Atomic*::fetch_min requires Rust 1.45.
-                    self.fetch_update(|old| Some(cmp::min(old, val))).unwrap()
-                } else {
-                    let _guard = lock(self.as_ptr() as usize).write();
-                    let value = unsafe { &mut *(self.as_ptr()) };
-                    let old = *value;
-                    *value = cmp::min(old, val);
-                    old
+                atomic! {
+                    $t, _a,
+                    {
+                        let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) };
+                        a.fetch_min(val, Ordering::AcqRel)
+                    },
+                    {
+                        let _guard = lock(self.as_ptr() as usize).write();
+                        let value = unsafe { &mut *(self.as_ptr()) };
+                        let old = *value;
+                        *value = cmp::min(old, val);
+                        old
+                    }
                 }
             }
         }
     };
 }
 
-impl_arithmetic!(u8, atomic::AtomicU8, "let a = AtomicCell::new(7u8);");
-impl_arithmetic!(i8, atomic::AtomicI8, "let a = AtomicCell::new(7i8);");
-impl_arithmetic!(u16, atomic::AtomicU16, "let a = AtomicCell::new(7u16);");
-impl_arithmetic!(i16, atomic::AtomicI16, "let a = AtomicCell::new(7i16);");
-impl_arithmetic!(u32, atomic::AtomicU32, "let a = AtomicCell::new(7u32);");
-impl_arithmetic!(i32, atomic::AtomicI32, "let a = AtomicCell::new(7i32);");
-#[cfg(not(crossbeam_no_atomic_64))]
-impl_arithmetic!(u64, atomic::AtomicU64, "let a = AtomicCell::new(7u64);");
-#[cfg(not(crossbeam_no_atomic_64))]
-impl_arithmetic!(i64, atomic::AtomicI64, "let a = AtomicCell::new(7i64);");
-#[cfg(crossbeam_no_atomic_64)]
+impl_arithmetic!(u8, AtomicU8, "let a = AtomicCell::new(7u8);");
+impl_arithmetic!(i8, AtomicI8, "let a = AtomicCell::new(7i8);");
+impl_arithmetic!(u16, AtomicU16, "let a = AtomicCell::new(7u16);");
+impl_arithmetic!(i16, AtomicI16, "let a = AtomicCell::new(7i16);");
+
+impl_arithmetic!(u32, AtomicU32, "let a = AtomicCell::new(7u32);");
+impl_arithmetic!(i32, AtomicI32, "let a = AtomicCell::new(7i32);");
+
+#[cfg(target_has_atomic = "64")]
+impl_arithmetic!(u64, AtomicU64, "let a = AtomicCell::new(7u64);");
+#[cfg(target_has_atomic = "64")]
+impl_arithmetic!(i64, AtomicI64, "let a = AtomicCell::new(7i64);");
+#[cfg(not(target_has_atomic = "64"))]
 impl_arithmetic!(u64, fallback, "let a = AtomicCell::new(7u64);");
-#[cfg(crossbeam_no_atomic_64)]
+#[cfg(not(target_has_atomic = "64"))]
 impl_arithmetic!(i64, fallback, "let a = AtomicCell::new(7i64);");
+
 // TODO: AtomicU128 is unstable
-// impl_arithmetic!(u128, atomic::AtomicU128, "let a = AtomicCell::new(7u128);");
-// impl_arithmetic!(i128, atomic::AtomicI128, "let a = AtomicCell::new(7i128);");
+// impl_arithmetic!(u128, AtomicU128, "let a = AtomicCell::new(7u128);");
+// impl_arithmetic!(i128, AtomicI128, "let a = AtomicCell::new(7i128);");
 impl_arithmetic!(u128, fallback, "let a = AtomicCell::new(7u128);");
 impl_arithmetic!(i128, fallback, "let a = AtomicCell::new(7i128);");
 
-impl_arithmetic!(
-    usize,
-    atomic::AtomicUsize,
-    "let a = AtomicCell::new(7usize);"
-);
-impl_arithmetic!(
-    isize,
-    atomic::AtomicIsize,
-    "let a = AtomicCell::new(7isize);"
-);
+impl_arithmetic!(usize, AtomicUsize, "let a = AtomicCell::new(7usize);");
+impl_arithmetic!(isize, AtomicIsize, "let a = AtomicCell::new(7isize);");
 
 impl AtomicCell<bool> {
     /// Applies logical "and" to the current value and returns the previous value.
@@ -768,8 +820,20 @@
     /// ```
     #[inline]
     pub fn fetch_and(&self, val: bool) -> bool {
-        let a = unsafe { &*(self.as_ptr() as *const AtomicBool) };
-        a.fetch_and(val, Ordering::AcqRel)
+        atomic! {
+            bool, _a,
+            {
+                let a = unsafe { &*(self.as_ptr() as *const atomic::AtomicBool) };
+                a.fetch_and(val, Ordering::AcqRel)
+            },
+            {
+                let _guard = lock(self.as_ptr() as usize).write();
+                let value = unsafe { &mut *(self.as_ptr()) };
+                let old = *value;
+                *value &= val;
+                old
+            }
+        }
     }
 
     /// Applies logical "nand" to the current value and returns the previous value.
@@ -792,8 +856,20 @@
     /// ```
     #[inline]
     pub fn fetch_nand(&self, val: bool) -> bool {
-        let a = unsafe { &*(self.as_ptr() as *const AtomicBool) };
-        a.fetch_nand(val, Ordering::AcqRel)
+        atomic! {
+            bool, _a,
+            {
+                let a = unsafe { &*(self.as_ptr() as *const atomic::AtomicBool) };
+                a.fetch_nand(val, Ordering::AcqRel)
+            },
+            {
+                let _guard = lock(self.as_ptr() as usize).write();
+                let value = unsafe { &mut *(self.as_ptr()) };
+                let old = *value;
+                *value = !(old & val);
+                old
+            }
+        }
     }
 
     /// Applies logical "or" to the current value and returns the previous value.
@@ -813,8 +889,20 @@
     /// ```
     #[inline]
     pub fn fetch_or(&self, val: bool) -> bool {
-        let a = unsafe { &*(self.as_ptr() as *const AtomicBool) };
-        a.fetch_or(val, Ordering::AcqRel)
+        atomic! {
+            bool, _a,
+            {
+                let a = unsafe { &*(self.as_ptr() as *const atomic::AtomicBool) };
+                a.fetch_or(val, Ordering::AcqRel)
+            },
+            {
+                let _guard = lock(self.as_ptr() as usize).write();
+                let value = unsafe { &mut *(self.as_ptr()) };
+                let old = *value;
+                *value |= val;
+                old
+            }
+        }
     }
 
     /// Applies logical "xor" to the current value and returns the previous value.
@@ -834,8 +922,20 @@
     /// ```
     #[inline]
     pub fn fetch_xor(&self, val: bool) -> bool {
-        let a = unsafe { &*(self.as_ptr() as *const AtomicBool) };
-        a.fetch_xor(val, Ordering::AcqRel)
+        atomic! {
+            bool, _a,
+            {
+                let a = unsafe { &*(self.as_ptr() as *const atomic::AtomicBool) };
+                a.fetch_xor(val, Ordering::AcqRel)
+            },
+            {
+                let _guard = lock(self.as_ptr() as usize).write();
+                let value = unsafe { &mut *(self.as_ptr()) };
+                let old = *value;
+                *value ^= val;
+                old
+            }
+        }
     }
 }
 
@@ -899,10 +999,10 @@
     // Now, if we have a slice of type `&[Foo]`, it is possible that field `a` in all items gets
     // stored at addresses that are multiples of 3. It'd be too bad if `LEN` was divisible by 3.
     // In order to protect from such cases, we simply choose a large prime number for `LEN`.
-    const LEN: usize = 97;
+    const LEN: usize = 67;
     #[allow(clippy::declare_interior_mutable_const)]
-    const L: SeqLock = SeqLock::new();
-    static LOCKS: [SeqLock; LEN] = [L; LEN];
+    const L: CachePadded<SeqLock> = CachePadded::new(SeqLock::new());
+    static LOCKS: [CachePadded<SeqLock>; LEN] = [L; LEN];
 
     // If the modulus is a constant number, the compiler will use crazy math to transform this into
     // a sequence of cheap arithmetic operations rather than using the slow modulo instruction.
@@ -936,48 +1036,9 @@
     }
 }
 
-macro_rules! atomic {
-    // If values of type `$t` can be transmuted into values of the primitive atomic type `$atomic`,
-    // declares variable `$a` of type `$atomic` and executes `$atomic_op`, breaking out of the loop.
-    (@check, $t:ty, $atomic:ty, $a:ident, $atomic_op:expr) => {
-        if can_transmute::<$t, $atomic>() {
-            let $a: &$atomic;
-            break $atomic_op;
-        }
-    };
-
-    // If values of type `$t` can be transmuted into values of a primitive atomic type, declares
-    // variable `$a` of that type and executes `$atomic_op`. Otherwise, just executes
-    // `$fallback_op`.
-    ($t:ty, $a:ident, $atomic_op:expr, $fallback_op:expr) => {
-        loop {
-            atomic!(@check, $t, AtomicUnit, $a, $atomic_op);
-
-            atomic!(@check, $t, atomic::AtomicU8, $a, $atomic_op);
-            atomic!(@check, $t, atomic::AtomicU16, $a, $atomic_op);
-            atomic!(@check, $t, atomic::AtomicU32, $a, $atomic_op);
-            #[cfg(not(crossbeam_no_atomic_64))]
-            atomic!(@check, $t, atomic::AtomicU64, $a, $atomic_op);
-            // TODO: AtomicU128 is unstable
-            // atomic!(@check, $t, atomic::AtomicU128, $a, $atomic_op);
-
-            break $fallback_op;
-        }
-    };
-}
-
 /// Returns `true` if operations on `AtomicCell<T>` are lock-free.
 const fn atomic_is_lock_free<T>() -> bool {
-    // HACK(taiki-e): This is equivalent to `atomic! { T, _a, true, false }`, but can be used in const fn even in our MSRV (Rust 1.38).
-    let is_lock_free = can_transmute::<T, AtomicUnit>()
-        | can_transmute::<T, atomic::AtomicU8>()
-        | can_transmute::<T, atomic::AtomicU16>()
-        | can_transmute::<T, atomic::AtomicU32>();
-    #[cfg(not(crossbeam_no_atomic_64))]
-    let is_lock_free = is_lock_free | can_transmute::<T, atomic::AtomicU64>();
-    // TODO: AtomicU128 is unstable
-    // let is_lock_free = is_lock_free | can_transmute::<T, atomic::AtomicU128>();
-    is_lock_free
+    atomic! { T, _a, true, false }
 }
 
 /// Atomically reads data from `src`.
diff --git a/src/atomic/consume.rs b/src/atomic/consume.rs
index 277b370..ff8e316 100644
--- a/src/atomic/consume.rs
+++ b/src/atomic/consume.rs
@@ -1,5 +1,3 @@
-#[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
-use crate::primitive::sync::atomic::compiler_fence;
 #[cfg(not(crossbeam_no_atomic))]
 use core::sync::atomic::Ordering;
 
@@ -27,11 +25,21 @@
 }
 
 #[cfg(not(crossbeam_no_atomic))]
-#[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
+// Miri and Loom don't support "consume" ordering and ThreadSanitizer doesn't treat
+// load(Relaxed) + compiler_fence(Acquire) as "consume" load.
+// LLVM generates machine code equivalent to fence(Acquire) in compiler_fence(Acquire)
+// on PowerPC, MIPS, etc. (https://godbolt.org/z/hffvjvW7h), so for now the fence
+// can be actually avoided here only on ARM and AArch64. See also
+// https://github.com/rust-lang/rust/issues/62256.
+#[cfg(all(
+    any(target_arch = "arm", target_arch = "aarch64"),
+    not(any(miri, crossbeam_loom, crossbeam_sanitize_thread)),
+))]
 macro_rules! impl_consume {
     () => {
         #[inline]
         fn load_consume(&self) -> Self::Val {
+            use crate::primitive::sync::atomic::compiler_fence;
             let result = self.load(Ordering::Relaxed);
             compiler_fence(Ordering::Acquire);
             result
@@ -40,7 +48,10 @@
 }
 
 #[cfg(not(crossbeam_no_atomic))]
-#[cfg(not(any(target_arch = "arm", target_arch = "aarch64")))]
+#[cfg(not(all(
+    any(target_arch = "arm", target_arch = "aarch64"),
+    not(any(miri, crossbeam_loom, crossbeam_sanitize_thread)),
+)))]
 macro_rules! impl_consume {
     () => {
         #[inline]
@@ -72,11 +83,19 @@
 impl_atomic!(AtomicI8, i8);
 impl_atomic!(AtomicU16, u16);
 impl_atomic!(AtomicI16, i16);
+#[cfg(any(target_has_atomic = "32", not(target_pointer_width = "16")))]
 impl_atomic!(AtomicU32, u32);
+#[cfg(any(target_has_atomic = "32", not(target_pointer_width = "16")))]
 impl_atomic!(AtomicI32, i32);
-#[cfg(not(crossbeam_no_atomic_64))]
+#[cfg(any(
+    target_has_atomic = "64",
+    not(any(target_pointer_width = "16", target_pointer_width = "32")),
+))]
 impl_atomic!(AtomicU64, u64);
-#[cfg(not(crossbeam_no_atomic_64))]
+#[cfg(any(
+    target_has_atomic = "64",
+    not(any(target_pointer_width = "16", target_pointer_width = "32")),
+))]
 impl_atomic!(AtomicI64, i64);
 
 #[cfg(not(crossbeam_no_atomic))]
diff --git a/src/atomic/mod.rs b/src/atomic/mod.rs
index 3896785..7b39fe4 100644
--- a/src/atomic/mod.rs
+++ b/src/atomic/mod.rs
@@ -3,35 +3,30 @@
 //! * [`AtomicCell`], a thread-safe mutable memory location.
 //! * [`AtomicConsume`], for reading from primitive atomic types with "consume" ordering.
 
-#[cfg(not(crossbeam_no_atomic_cas))]
+#[cfg(target_has_atomic = "ptr")]
 #[cfg(not(crossbeam_loom))]
-cfg_if::cfg_if! {
-    // Use "wide" sequence lock if the pointer width <= 32 for preventing its counter against wrap
-    // around.
-    //
-    // We are ignoring too wide architectures (pointer width >= 256), since such a system will not
-    // appear in a conceivable future.
-    //
-    // In narrow architectures (pointer width <= 16), the counter is still <= 32-bit and may be
-    // vulnerable to wrap around. But it's mostly okay, since in such a primitive hardware, the
-    // counter will not be increased that fast.
-    if #[cfg(any(target_pointer_width = "64", target_pointer_width = "128"))] {
-        mod seq_lock;
-    } else {
-        #[path = "seq_lock_wide.rs"]
-        mod seq_lock;
-    }
-}
+// Use "wide" sequence lock if the pointer width <= 32 for preventing its counter against wrap
+// around.
+//
+// In narrow architectures (pointer width <= 16), the counter is still <= 32-bit and may be
+// vulnerable to wrap around. But it's mostly okay, since in such a primitive hardware, the
+// counter will not be increased that fast.
+// Note that Rust (and C99) pointers must be at least 16-bits: https://github.com/rust-lang/rust/pull/49305
+#[cfg_attr(
+    any(target_pointer_width = "16", target_pointer_width = "32"),
+    path = "seq_lock_wide.rs"
+)]
+mod seq_lock;
 
-#[cfg(not(crossbeam_no_atomic_cas))]
+#[cfg(target_has_atomic = "ptr")]
 // We cannot provide AtomicCell under cfg(crossbeam_loom) because loom's atomic
 // types have a different in-memory representation than the underlying type.
 // TODO: The latest loom supports fences, so fallback using seqlock may be available.
 #[cfg(not(crossbeam_loom))]
 mod atomic_cell;
-mod consume;
-
-#[cfg(not(crossbeam_no_atomic_cas))]
+#[cfg(target_has_atomic = "ptr")]
 #[cfg(not(crossbeam_loom))]
-pub use self::atomic_cell::AtomicCell;
-pub use self::consume::AtomicConsume;
+pub use atomic_cell::AtomicCell;
+
+mod consume;
+pub use consume::AtomicConsume;
diff --git a/src/backoff.rs b/src/backoff.rs
index 9e256aa..7a505ed 100644
--- a/src/backoff.rs
+++ b/src/backoff.rs
@@ -1,4 +1,4 @@
-use crate::primitive::sync::atomic;
+use crate::primitive::hint;
 use core::cell::Cell;
 use core::fmt;
 
@@ -145,10 +145,7 @@
     #[inline]
     pub fn spin(&self) {
         for _ in 0..1 << self.step.get().min(SPIN_LIMIT) {
-            // TODO(taiki-e): once we bump the minimum required Rust version to 1.49+,
-            // use [`core::hint::spin_loop`] instead.
-            #[allow(deprecated)]
-            atomic::spin_loop_hint();
+            hint::spin_loop();
         }
 
         if self.step.get() <= SPIN_LIMIT {
@@ -209,18 +206,12 @@
     pub fn snooze(&self) {
         if self.step.get() <= SPIN_LIMIT {
             for _ in 0..1 << self.step.get() {
-                // TODO(taiki-e): once we bump the minimum required Rust version to 1.49+,
-                // use [`core::hint::spin_loop`] instead.
-                #[allow(deprecated)]
-                atomic::spin_loop_hint();
+                hint::spin_loop();
             }
         } else {
             #[cfg(not(feature = "std"))]
             for _ in 0..1 << self.step.get() {
-                // TODO(taiki-e): once we bump the minimum required Rust version to 1.49+,
-                // use [`core::hint::spin_loop`] instead.
-                #[allow(deprecated)]
-                atomic::spin_loop_hint();
+                hint::spin_loop();
             }
 
             #[cfg(feature = "std")]
diff --git a/src/cache_padded.rs b/src/cache_padded.rs
index b5d5d33..f44f2d7 100644
--- a/src/cache_padded.rs
+++ b/src/cache_padded.rs
@@ -14,7 +14,8 @@
 /// Cache lines are assumed to be N bytes long, depending on the architecture:
 ///
 /// * On x86-64, aarch64, and powerpc64, N = 128.
-/// * On arm, mips, mips64, and riscv64, N = 32.
+/// * On arm, mips, mips64, sparc, and hexagon, N = 32.
+/// * On m68k, N = 16.
 /// * On s390x, N = 256.
 /// * On all others, N = 64.
 ///
@@ -75,6 +76,7 @@
 //
 // Sources:
 // - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_ppc64x.go#L9
+// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/powerpc/include/asm/cache.h#L26
 #[cfg_attr(
     any(
         target_arch = "x86_64",
@@ -83,33 +85,45 @@
     ),
     repr(align(128))
 )]
-// arm, mips, mips64, and riscv64 have 32-byte cache line size.
+// arm, mips, mips64, sparc, and hexagon have 32-byte cache line size.
 //
 // Sources:
 // - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_arm.go#L7
 // - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips.go#L7
 // - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mipsle.go#L7
 // - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips64x.go#L9
-// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_riscv64.go#L7
+// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/sparc/include/asm/cache.h#L17
+// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/hexagon/include/asm/cache.h#L12
 #[cfg_attr(
     any(
         target_arch = "arm",
         target_arch = "mips",
+        target_arch = "mips32r6",
         target_arch = "mips64",
-        target_arch = "riscv64",
+        target_arch = "mips64r6",
+        target_arch = "sparc",
+        target_arch = "hexagon",
     ),
     repr(align(32))
 )]
+// m68k has 16-byte cache line size.
+//
+// Sources:
+// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/m68k/include/asm/cache.h#L9
+#[cfg_attr(target_arch = "m68k", repr(align(16)))]
 // s390x has 256-byte cache line size.
 //
 // Sources:
 // - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_s390x.go#L7
+// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/s390/include/asm/cache.h#L13
 #[cfg_attr(target_arch = "s390x", repr(align(256)))]
-// x86 and wasm have 64-byte cache line size.
+// x86, wasm, riscv, and sparc64 have 64-byte cache line size.
 //
 // Sources:
 // - https://github.com/golang/go/blob/dda2991c2ea0c5914714469c4defc2562a907230/src/internal/cpu/cpu_x86.go#L9
 // - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_wasm.go#L7
+// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/riscv/include/asm/cache.h#L10
+// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/sparc/include/asm/cache.h#L19
 //
 // All others are assumed to have 64-byte cache line size.
 #[cfg_attr(
@@ -119,8 +133,12 @@
         target_arch = "powerpc64",
         target_arch = "arm",
         target_arch = "mips",
+        target_arch = "mips32r6",
         target_arch = "mips64",
-        target_arch = "riscv64",
+        target_arch = "mips64r6",
+        target_arch = "sparc",
+        target_arch = "hexagon",
+        target_arch = "m68k",
         target_arch = "s390x",
     )),
     repr(align(64))
diff --git a/src/lib.rs b/src/lib.rs
index 191c5a1..7206c1e 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -42,12 +42,14 @@
 #[cfg(crossbeam_loom)]
 #[allow(unused_imports)]
 mod primitive {
+    pub(crate) mod hint {
+        pub(crate) use loom::hint::spin_loop;
+    }
     pub(crate) mod sync {
         pub(crate) mod atomic {
-            pub(crate) use loom::sync::atomic::spin_loop_hint;
             pub(crate) use loom::sync::atomic::{
                 AtomicBool, AtomicI16, AtomicI32, AtomicI64, AtomicI8, AtomicIsize, AtomicU16,
-                AtomicU32, AtomicU64, AtomicU8, AtomicUsize,
+                AtomicU32, AtomicU64, AtomicU8, AtomicUsize, Ordering,
             };
 
             // FIXME: loom does not support compiler_fence at the moment.
@@ -63,19 +65,24 @@
 #[cfg(not(crossbeam_loom))]
 #[allow(unused_imports)]
 mod primitive {
+    pub(crate) mod hint {
+        pub(crate) use core::hint::spin_loop;
+    }
     pub(crate) mod sync {
         pub(crate) mod atomic {
-            pub(crate) use core::sync::atomic::compiler_fence;
-            // TODO(taiki-e): once we bump the minimum required Rust version to 1.49+,
-            // use [`core::hint::spin_loop`] instead.
-            #[allow(deprecated)]
-            pub(crate) use core::sync::atomic::spin_loop_hint;
+            pub(crate) use core::sync::atomic::{compiler_fence, Ordering};
             #[cfg(not(crossbeam_no_atomic))]
             pub(crate) use core::sync::atomic::{
-                AtomicBool, AtomicI16, AtomicI32, AtomicI8, AtomicIsize, AtomicU16, AtomicU32,
-                AtomicU8, AtomicUsize,
+                AtomicBool, AtomicI16, AtomicI8, AtomicIsize, AtomicU16, AtomicU8, AtomicUsize,
             };
-            #[cfg(not(crossbeam_no_atomic_64))]
+            #[cfg(not(crossbeam_no_atomic))]
+            #[cfg(any(target_has_atomic = "32", not(target_pointer_width = "16")))]
+            pub(crate) use core::sync::atomic::{AtomicI32, AtomicU32};
+            #[cfg(not(crossbeam_no_atomic))]
+            #[cfg(any(
+                target_has_atomic = "64",
+                not(any(target_pointer_width = "16", target_pointer_width = "32")),
+            ))]
             pub(crate) use core::sync::atomic::{AtomicI64, AtomicU64};
         }
 
@@ -92,13 +99,9 @@
 mod backoff;
 pub use crate::backoff::Backoff;
 
-use cfg_if::cfg_if;
+#[cfg(feature = "std")]
+pub mod sync;
 
-cfg_if! {
-    if #[cfg(feature = "std")] {
-        pub mod sync;
-
-        #[cfg(not(crossbeam_loom))]
-        pub mod thread;
-    }
-}
+#[cfg(feature = "std")]
+#[cfg(not(crossbeam_loom))]
+pub mod thread;
diff --git a/src/sync/once_lock.rs b/src/sync/once_lock.rs
index c1fefc9..e057aca 100644
--- a/src/sync/once_lock.rs
+++ b/src/sync/once_lock.rs
@@ -4,13 +4,10 @@
 
 use core::cell::UnsafeCell;
 use core::mem::MaybeUninit;
-use core::sync::atomic::{AtomicBool, Ordering};
 use std::sync::Once;
 
 pub(crate) struct OnceLock<T> {
     once: Once,
-    // Once::is_completed requires Rust 1.43, so use this to track of whether they have been initialized.
-    is_initialized: AtomicBool,
     value: UnsafeCell<MaybeUninit<T>>,
     // Unlike std::sync::OnceLock, we don't need PhantomData here because
     // we don't use #[may_dangle].
@@ -25,7 +22,6 @@
     pub(crate) const fn new() -> Self {
         Self {
             once: Once::new(),
-            is_initialized: AtomicBool::new(false),
             value: UnsafeCell::new(MaybeUninit::uninit()),
         }
     }
@@ -50,37 +46,26 @@
         F: FnOnce() -> T,
     {
         // Fast path check
-        if self.is_initialized() {
+        if self.once.is_completed() {
             // SAFETY: The inner value has been initialized
             return unsafe { self.get_unchecked() };
         }
         self.initialize(f);
 
-        debug_assert!(self.is_initialized());
-
         // SAFETY: The inner value has been initialized
         unsafe { self.get_unchecked() }
     }
 
-    #[inline]
-    fn is_initialized(&self) -> bool {
-        self.is_initialized.load(Ordering::Acquire)
-    }
-
     #[cold]
     fn initialize<F>(&self, f: F)
     where
         F: FnOnce() -> T,
     {
-        let slot = self.value.get().cast::<T>();
-        let is_initialized = &self.is_initialized;
+        let slot = self.value.get();
 
         self.once.call_once(|| {
             let value = f();
-            unsafe {
-                slot.write(value);
-            }
-            is_initialized.store(true, Ordering::Release);
+            unsafe { slot.write(MaybeUninit::new(value)) }
         });
     }
 
@@ -88,16 +73,16 @@
     ///
     /// The value must be initialized
     unsafe fn get_unchecked(&self) -> &T {
-        debug_assert!(self.is_initialized());
+        debug_assert!(self.once.is_completed());
         &*self.value.get().cast::<T>()
     }
 }
 
 impl<T> Drop for OnceLock<T> {
     fn drop(&mut self) {
-        if self.is_initialized() {
+        if self.once.is_completed() {
             // SAFETY: The inner value has been initialized
-            unsafe { self.value.get().cast::<T>().drop_in_place() };
+            unsafe { (*self.value.get()).assume_init_drop() };
         }
     }
 }
diff --git a/src/sync/parker.rs b/src/sync/parker.rs
index 9cb3a26..971981d 100644
--- a/src/sync/parker.rs
+++ b/src/sync/parker.rs
@@ -1,6 +1,5 @@
-use crate::primitive::sync::atomic::AtomicUsize;
+use crate::primitive::sync::atomic::{AtomicUsize, Ordering::SeqCst};
 use crate::primitive::sync::{Arc, Condvar, Mutex};
-use core::sync::atomic::Ordering::SeqCst;
 use std::fmt;
 use std::marker::PhantomData;
 use std::time::{Duration, Instant};
diff --git a/src/sync/sharded_lock.rs b/src/sync/sharded_lock.rs
index a8f4584..5aee56f 100644
--- a/src/sync/sharded_lock.rs
+++ b/src/sync/sharded_lock.rs
@@ -356,7 +356,7 @@
             for shard in self.shards[0..i].iter().rev() {
                 unsafe {
                     let dest: *mut _ = shard.write_guard.get();
-                    let guard = mem::replace(&mut *dest, None);
+                    let guard = (*dest).take();
                     drop(guard);
                 }
             }
@@ -526,7 +526,7 @@
         for shard in self.lock.shards.iter().rev() {
             unsafe {
                 let dest: *mut _ = shard.write_guard.get();
-                let guard = mem::replace(&mut *dest, None);
+                let guard = (*dest).take();
                 drop(guard);
             }
         }
diff --git a/src/thread.rs b/src/thread.rs
index 7446454..b2e063a 100644
--- a/src/thread.rs
+++ b/src/thread.rs
@@ -84,7 +84,7 @@
 //! tricky because argument `s` lives *inside* the invocation of `thread::scope()` and as such
 //! cannot be borrowed by scoped threads:
 //!
-//! ```compile_fail,E0373,E0521
+//! ```compile_fail,E0521
 //! use crossbeam_utils::thread;
 //!
 //! thread::scope(|s| {
@@ -120,7 +120,6 @@
 use std::thread;
 
 use crate::sync::WaitGroup;
-use cfg_if::cfg_if;
 
 type SharedVec<T> = Arc<Mutex<Vec<T>>>;
 type SharedOption<T> = Arc<Mutex<Option<T>>>;
@@ -152,6 +151,15 @@
 where
     F: FnOnce(&Scope<'env>) -> R,
 {
+    struct AbortOnPanic;
+    impl Drop for AbortOnPanic {
+        fn drop(&mut self) {
+            if thread::panicking() {
+                std::process::abort();
+            }
+        }
+    }
+
     let wg = WaitGroup::new();
     let scope = Scope::<'env> {
         handles: SharedVec::default(),
@@ -162,6 +170,10 @@
     // Execute the scoped function, but catch any panics.
     let result = panic::catch_unwind(panic::AssertUnwindSafe(|| f(&scope)));
 
+    // If an unwinding panic occurs before all threads are joined
+    // promote it to an aborting panic to prevent any threads from escaping the scope.
+    let guard = AbortOnPanic;
+
     // Wait until all nested scopes are dropped.
     drop(scope.wait_group);
     wg.wait();
@@ -177,6 +189,8 @@
         .filter_map(|handle| handle.join().err())
         .collect();
 
+    mem::forget(guard);
+
     // If `f` has panicked, resume unwinding.
     // If any of the child threads have panicked, return the panic errors.
     // Otherwise, everything is OK and return the result of `f`.
@@ -547,37 +561,42 @@
     }
 }
 
-cfg_if! {
-    if #[cfg(unix)] {
-        use std::os::unix::thread::{JoinHandleExt, RawPthread};
+/// Unix-specific extensions.
+#[cfg(unix)]
+mod unix {
+    use super::ScopedJoinHandle;
+    use std::os::unix::thread::{JoinHandleExt, RawPthread};
 
-        impl<T> JoinHandleExt for ScopedJoinHandle<'_, T> {
-            fn as_pthread_t(&self) -> RawPthread {
-                // Borrow the handle. The handle will surely be available because the root scope waits
-                // for nested scopes before joining remaining threads.
-                let handle = self.handle.lock().unwrap();
-                handle.as_ref().unwrap().as_pthread_t()
-            }
-            fn into_pthread_t(self) -> RawPthread {
-                self.as_pthread_t()
-            }
+    impl<T> JoinHandleExt for ScopedJoinHandle<'_, T> {
+        fn as_pthread_t(&self) -> RawPthread {
+            // Borrow the handle. The handle will surely be available because the root scope waits
+            // for nested scopes before joining remaining threads.
+            let handle = self.handle.lock().unwrap();
+            handle.as_ref().unwrap().as_pthread_t()
         }
-    } else if #[cfg(windows)] {
-        use std::os::windows::io::{AsRawHandle, IntoRawHandle, RawHandle};
-
-        impl<T> AsRawHandle for ScopedJoinHandle<'_, T> {
-            fn as_raw_handle(&self) -> RawHandle {
-                // Borrow the handle. The handle will surely be available because the root scope waits
-                // for nested scopes before joining remaining threads.
-                let handle = self.handle.lock().unwrap();
-                handle.as_ref().unwrap().as_raw_handle()
-            }
+        fn into_pthread_t(self) -> RawPthread {
+            self.as_pthread_t()
         }
+    }
+}
+/// Windows-specific extensions.
+#[cfg(windows)]
+mod windows {
+    use super::ScopedJoinHandle;
+    use std::os::windows::io::{AsRawHandle, IntoRawHandle, RawHandle};
 
-        impl<T> IntoRawHandle for ScopedJoinHandle<'_, T> {
-            fn into_raw_handle(self) -> RawHandle {
-                self.as_raw_handle()
-            }
+    impl<T> AsRawHandle for ScopedJoinHandle<'_, T> {
+        fn as_raw_handle(&self) -> RawHandle {
+            // Borrow the handle. The handle will surely be available because the root scope waits
+            // for nested scopes before joining remaining threads.
+            let handle = self.handle.lock().unwrap();
+            handle.as_ref().unwrap().as_raw_handle()
+        }
+    }
+
+    impl<T> IntoRawHandle for ScopedJoinHandle<'_, T> {
+        fn into_raw_handle(self) -> RawHandle {
+            self.as_raw_handle()
         }
     }
 }
diff --git a/tests/atomic_cell.rs b/tests/atomic_cell.rs
index edb7a4b..9fe6932 100644
--- a/tests/atomic_cell.rs
+++ b/tests/atomic_cell.rs
@@ -6,11 +6,11 @@
 
 #[test]
 fn is_lock_free() {
-    struct UsizeWrap(usize);
-    struct U8Wrap(bool);
-    struct I16Wrap(i16);
+    struct UsizeWrap(#[allow(dead_code)] usize);
+    struct U8Wrap(#[allow(dead_code)] bool);
+    struct I16Wrap(#[allow(dead_code)] i16);
     #[repr(align(8))]
-    struct U64Align8(u64);
+    struct U64Align8(#[allow(dead_code)] u64);
 
     assert!(AtomicCell::<usize>::is_lock_free());
     assert!(AtomicCell::<isize>::is_lock_free());
@@ -35,13 +35,13 @@
     // of `AtomicU64` is `8`, so `AtomicCell<u64>` is not lock-free.
     assert_eq!(
         AtomicCell::<u64>::is_lock_free(),
-        cfg!(not(crossbeam_no_atomic_64)) && std::mem::align_of::<u64>() == 8
+        cfg!(target_has_atomic = "64") && std::mem::align_of::<u64>() == 8
     );
     assert_eq!(mem::size_of::<U64Align8>(), 8);
     assert_eq!(mem::align_of::<U64Align8>(), 8);
     assert_eq!(
         AtomicCell::<U64Align8>::is_lock_free(),
-        cfg!(not(crossbeam_no_atomic_64))
+        cfg!(target_has_atomic = "64")
     );
 
     // AtomicU128 is unstable
@@ -307,7 +307,6 @@
 
 // https://github.com/crossbeam-rs/crossbeam/issues/748
 #[cfg_attr(miri, ignore)] // TODO
-#[rustversion::since(1.37)] // #[repr(align(N))] requires Rust 1.37
 #[test]
 fn issue_748() {
     #[allow(dead_code)]
@@ -321,14 +320,13 @@
     assert_eq!(mem::size_of::<Test>(), 8);
     assert_eq!(
         AtomicCell::<Test>::is_lock_free(),
-        cfg!(not(crossbeam_no_atomic_64))
+        cfg!(target_has_atomic = "64")
     );
     let x = AtomicCell::new(Test::FieldLess);
     assert_eq!(x.load(), Test::FieldLess);
 }
 
 // https://github.com/crossbeam-rs/crossbeam/issues/833
-#[rustversion::since(1.40)] // const_constructor requires Rust 1.40
 #[test]
 fn issue_833() {
     use std::num::NonZeroU128;
diff --git a/tests/wait_group.rs b/tests/wait_group.rs
index 0ec4a72..5b549b8 100644
--- a/tests/wait_group.rs
+++ b/tests/wait_group.rs
@@ -36,25 +36,27 @@
 }
 
 #[test]
-#[cfg_attr(miri, ignore)] // this test makes timing assumptions, but Miri is so slow it violates them
 fn wait_and_drop() {
     let wg = WaitGroup::new();
+    let wg2 = WaitGroup::new();
     let (tx, rx) = mpsc::channel();
 
     for _ in 0..THREADS {
         let wg = wg.clone();
+        let wg2 = wg2.clone();
         let tx = tx.clone();
 
         thread::spawn(move || {
-            thread::sleep(Duration::from_millis(100));
+            wg2.wait();
             tx.send(()).unwrap();
             drop(wg);
         });
     }
 
-    // At this point, all spawned threads should be in `thread::sleep`, so we shouldn't get anything
-    // from the channel.
+    // At this point, no thread has gotten past `wg2.wait()`, so we shouldn't get anything from the
+    // channel.
     assert!(rx.try_recv().is_err());
+    drop(wg2);
 
     wg.wait();