core: make atomic primitives type aliases of Atomic<T>

This commit is contained in:
joboet
2026-02-23 14:06:49 +01:00
parent 58745ca3b0
commit fa66fef1d1
+142 -112
View File
@@ -248,40 +248,60 @@
use crate::cell::UnsafeCell;
use crate::hint::spin_loop;
use crate::intrinsics::AtomicOrdering as AO;
use crate::mem::transmute;
use crate::{fmt, intrinsics};
trait Sealed {}
#[unstable(
feature = "atomic_internals",
reason = "implementation detail which may disappear or be replaced at any time",
issue = "none"
)]
#[expect(missing_debug_implementations)]
mod private {
pub(super) trait Sealed {}
#[cfg(target_has_atomic_load_store = "8")]
#[repr(C, align(1))]
pub struct Align1<T>(T);
#[cfg(target_has_atomic_load_store = "16")]
#[repr(C, align(2))]
pub struct Align2<T>(T);
#[cfg(target_has_atomic_load_store = "32")]
#[repr(C, align(4))]
pub struct Align4<T>(T);
#[cfg(target_has_atomic_load_store = "64")]
#[repr(C, align(8))]
pub struct Align8<T>(T);
#[cfg(target_has_atomic_load_store = "128")]
#[repr(C, align(16))]
pub struct Align16<T>(T);
}
/// A marker trait for primitive types which can be modified atomically.
///
/// This is an implementation detail for <code>[Atomic]\<T></code> which may disappear or be replaced at any time.
///
/// # Safety
///
/// Types implementing this trait must be primitives that can be modified atomically.
///
/// The associated `Self::AtomicInner` type must have the same size and bit validity as `Self`,
/// but may have a higher alignment requirement, so the following `transmute`s are sound:
///
/// - `&mut Self::AtomicInner` as `&mut Self`
/// - `Self` as `Self::AtomicInner` or the reverse
//
// # Safety
//
// Types implementing this trait must be primitives that can be modified atomically.
//
// The associated `Self::Storage` type must have the same size, but may have fewer validity
// invariants or a higher alignment requirement than `Self`.
#[unstable(
feature = "atomic_internals",
reason = "implementation detail which may disappear or be replaced at any time",
issue = "none"
)]
#[expect(private_bounds)]
pub unsafe trait AtomicPrimitive: Sized + Copy + Sealed {
pub unsafe trait AtomicPrimitive: Sized + Copy + private::Sealed {
/// Temporary implementation detail.
type AtomicInner: Sized;
type Storage: Sized;
}
macro impl_atomic_primitive(
$Atom:ident $(<$T:ident>)? ($Primitive:ty),
size($size:literal),
align($align:literal) $(,)?
[$($T:ident)?] $Primitive:ty as $Storage:ident<$Operand:ty>, size($size:literal)
) {
impl $(<$T>)? Sealed for $Primitive {}
impl $(<$T>)? private::Sealed for $Primitive {}
#[unstable(
feature = "atomic_internals",
@@ -290,42 +310,42 @@ impl $(<$T>)? Sealed for $Primitive {}
)]
#[cfg(target_has_atomic_load_store = $size)]
unsafe impl $(<$T>)? AtomicPrimitive for $Primitive {
type AtomicInner = $Atom $(<$T>)?;
type Storage = private::$Storage<$Operand>;
}
}
impl_atomic_primitive!(AtomicBool(bool), size("8"), align(1));
impl_atomic_primitive!(AtomicI8(i8), size("8"), align(1));
impl_atomic_primitive!(AtomicU8(u8), size("8"), align(1));
impl_atomic_primitive!(AtomicI16(i16), size("16"), align(2));
impl_atomic_primitive!(AtomicU16(u16), size("16"), align(2));
impl_atomic_primitive!(AtomicI32(i32), size("32"), align(4));
impl_atomic_primitive!(AtomicU32(u32), size("32"), align(4));
impl_atomic_primitive!(AtomicI64(i64), size("64"), align(8));
impl_atomic_primitive!(AtomicU64(u64), size("64"), align(8));
impl_atomic_primitive!(AtomicI128(i128), size("128"), align(16));
impl_atomic_primitive!(AtomicU128(u128), size("128"), align(16));
impl_atomic_primitive!([] bool as Align1<u8>, size("8"));
impl_atomic_primitive!([] i8 as Align1<i8>, size("8"));
impl_atomic_primitive!([] u8 as Align1<u8>, size("8"));
impl_atomic_primitive!([] i16 as Align2<i16>, size("16"));
impl_atomic_primitive!([] u16 as Align2<u16>, size("16"));
impl_atomic_primitive!([] i32 as Align4<i32>, size("32"));
impl_atomic_primitive!([] u32 as Align4<u32>, size("32"));
impl_atomic_primitive!([] i64 as Align8<i64>, size("64"));
impl_atomic_primitive!([] u64 as Align8<u64>, size("64"));
impl_atomic_primitive!([] i128 as Align16<i128>, size("128"));
impl_atomic_primitive!([] u128 as Align16<u128>, size("128"));
#[cfg(target_pointer_width = "16")]
impl_atomic_primitive!(AtomicIsize(isize), size("ptr"), align(2));
impl_atomic_primitive!([] isize as Align2<isize>, size("ptr"));
#[cfg(target_pointer_width = "32")]
impl_atomic_primitive!(AtomicIsize(isize), size("ptr"), align(4));
impl_atomic_primitive!([] isize as Align4<isize>, size("ptr"));
#[cfg(target_pointer_width = "64")]
impl_atomic_primitive!(AtomicIsize(isize), size("ptr"), align(8));
impl_atomic_primitive!([] isize as Align8<isize>, size("ptr"));
#[cfg(target_pointer_width = "16")]
impl_atomic_primitive!(AtomicUsize(usize), size("ptr"), align(2));
impl_atomic_primitive!([] usize as Align2<usize>, size("ptr"));
#[cfg(target_pointer_width = "32")]
impl_atomic_primitive!(AtomicUsize(usize), size("ptr"), align(4));
impl_atomic_primitive!([] usize as Align4<usize>, size("ptr"));
#[cfg(target_pointer_width = "64")]
impl_atomic_primitive!(AtomicUsize(usize), size("ptr"), align(8));
impl_atomic_primitive!([] usize as Align8<usize>, size("ptr"));
#[cfg(target_pointer_width = "16")]
impl_atomic_primitive!(AtomicPtr<T>(*mut T), size("ptr"), align(2));
impl_atomic_primitive!([T] *mut T as Align2<*mut T>, size("ptr"));
#[cfg(target_pointer_width = "32")]
impl_atomic_primitive!(AtomicPtr<T>(*mut T), size("ptr"), align(4));
impl_atomic_primitive!([T] *mut T as Align4<*mut T>, size("ptr"));
#[cfg(target_pointer_width = "64")]
impl_atomic_primitive!(AtomicPtr<T>(*mut T), size("ptr"), align(8));
impl_atomic_primitive!([T] *mut T as Align8<*mut T>, size("ptr"));
/// A memory location which can be safely modified from multiple threads.
///
@@ -342,7 +362,15 @@ unsafe impl $(<$T>)? AtomicPrimitive for $Primitive {
///
/// [module-level documentation]: crate::sync::atomic
#[unstable(feature = "generic_atomic", issue = "130539")]
pub type Atomic<T> = <T as AtomicPrimitive>::AtomicInner;
#[repr(C)]
pub struct Atomic<T: AtomicPrimitive> {
v: UnsafeCell<T::Storage>,
}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<T: AtomicPrimitive> Send for Atomic<T> {}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<T: AtomicPrimitive> Sync for Atomic<T> {}
// Some architectures don't have byte-sized atomics, which results in LLVM
// emulating them using a LL/SC loop. However for AtomicBool we can take
@@ -368,10 +396,7 @@ unsafe impl $(<$T>)? AtomicPrimitive for $Primitive {
#[cfg(target_has_atomic_load_store = "8")]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_diagnostic_item = "AtomicBool"]
#[repr(C, align(1))]
pub struct AtomicBool {
v: UnsafeCell<u8>,
}
pub type AtomicBool = Atomic<bool>;
#[cfg(target_has_atomic_load_store = "8")]
#[stable(feature = "rust1", since = "1.0.0")]
@@ -383,11 +408,6 @@ fn default() -> Self {
}
}
// Send is implicitly implemented for AtomicBool.
#[cfg(target_has_atomic_load_store = "8")]
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl Sync for AtomicBool {}
/// A raw pointer type which can be safely shared between threads.
///
/// This type has the same size and bit validity as a `*mut T`.
@@ -397,12 +417,7 @@ unsafe impl Sync for AtomicBool {}
#[cfg(target_has_atomic_load_store = "ptr")]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_diagnostic_item = "AtomicPtr"]
#[cfg_attr(target_pointer_width = "16", repr(C, align(2)))]
#[cfg_attr(target_pointer_width = "32", repr(C, align(4)))]
#[cfg_attr(target_pointer_width = "64", repr(C, align(8)))]
pub struct AtomicPtr<T> {
p: UnsafeCell<*mut T>,
}
pub type AtomicPtr<T> = Atomic<*mut T>;
#[cfg(target_has_atomic_load_store = "ptr")]
#[stable(feature = "rust1", since = "1.0.0")]
@@ -413,13 +428,6 @@ fn default() -> AtomicPtr<T> {
}
}
#[cfg(target_has_atomic_load_store = "ptr")]
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<T> Send for AtomicPtr<T> {}
#[cfg(target_has_atomic_load_store = "ptr")]
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<T> Sync for AtomicPtr<T> {}
/// Atomic memory orderings
///
/// Memory orderings specify the way atomic operations synchronize memory.
@@ -528,7 +536,9 @@ impl AtomicBool {
#[rustc_const_stable(feature = "const_atomic_new", since = "1.24.0")]
#[must_use]
pub const fn new(v: bool) -> AtomicBool {
AtomicBool { v: UnsafeCell::new(v as u8) }
// SAFETY:
// `Atomic<T>` is essentially a transparent wrapper around `T`.
unsafe { transmute(v) }
}
/// Creates a new `AtomicBool` from a pointer.
@@ -597,7 +607,7 @@ pub const fn new(v: bool) -> AtomicBool {
#[stable(feature = "atomic_access", since = "1.15.0")]
pub fn get_mut(&mut self) -> &mut bool {
// SAFETY: the mutable reference guarantees unique ownership.
unsafe { &mut *(self.v.get() as *mut bool) }
unsafe { &mut *self.as_ptr() }
}
/// Gets atomic access to a `&mut bool`.
@@ -699,7 +709,11 @@ pub fn from_mut_slice(v: &mut [bool]) -> &mut [Self] {
#[stable(feature = "atomic_access", since = "1.15.0")]
#[rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0")]
pub const fn into_inner(self) -> bool {
self.v.into_inner() != 0
// SAFETY:
// * `Atomic<T>` is essentially a transparent wrapper around `T`.
// * all operations on `Atomic<bool>` ensure that `T::Storage` remains
// a valid `bool`.
unsafe { transmute(self) }
}
/// Loads a value from the bool.
@@ -726,7 +740,7 @@ pub const fn into_inner(self) -> bool {
pub fn load(&self, order: Ordering) -> bool {
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
unsafe { atomic_load(self.v.get(), order) != 0 }
unsafe { atomic_load(self.v.get().cast::<u8>(), order) != 0 }
}
/// Stores a value into the bool.
@@ -756,7 +770,7 @@ pub fn store(&self, val: bool, order: Ordering) {
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
unsafe {
atomic_store(self.v.get(), val as u8, order);
atomic_store(self.v.get().cast::<u8>(), val as u8, order);
}
}
@@ -790,7 +804,7 @@ pub fn swap(&self, val: bool, order: Ordering) -> bool {
if val { self.fetch_or(true, order) } else { self.fetch_and(false, order) }
} else {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
unsafe { atomic_swap(self.v.get().cast::<u8>(), val as u8, order) != 0 }
}
}
@@ -950,7 +964,13 @@ pub fn compare_exchange(
} else {
// SAFETY: data races are prevented by atomic intrinsics.
match unsafe {
atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure)
atomic_compare_exchange(
self.v.get().cast::<u8>(),
current as u8,
new as u8,
success,
failure,
)
} {
Ok(x) => Ok(x != 0),
Err(x) => Err(x != 0),
@@ -1024,7 +1044,13 @@ pub fn compare_exchange_weak(
// SAFETY: data races are prevented by atomic intrinsics.
match unsafe {
atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure)
atomic_compare_exchange_weak(
self.v.get().cast::<u8>(),
current as u8,
new as u8,
success,
failure,
)
} {
Ok(x) => Ok(x != 0),
Err(x) => Err(x != 0),
@@ -1070,7 +1096,7 @@ pub fn compare_exchange_weak(
#[rustc_should_not_be_called_on_const_items]
pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
unsafe { atomic_and(self.v.get().cast::<u8>(), val as u8, order) != 0 }
}
/// Logical "nand" with a boolean value.
@@ -1166,7 +1192,7 @@ pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
#[rustc_should_not_be_called_on_const_items]
pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
unsafe { atomic_or(self.v.get().cast::<u8>(), val as u8, order) != 0 }
}
/// Logical "xor" with a boolean value.
@@ -1208,7 +1234,7 @@ pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
#[rustc_should_not_be_called_on_const_items]
pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
unsafe { atomic_xor(self.v.get().cast::<u8>(), val as u8, order) != 0 }
}
/// Logical "not" with a boolean value.
@@ -1457,7 +1483,9 @@ impl<T> AtomicPtr<T> {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_atomic_new", since = "1.24.0")]
pub const fn new(p: *mut T) -> AtomicPtr<T> {
AtomicPtr { p: UnsafeCell::new(p) }
// SAFETY:
// `Atomic<T>` is essentially a transparent wrapper around `T`.
unsafe { transmute(p) }
}
/// Creates a new `AtomicPtr` from a pointer.
@@ -1544,7 +1572,9 @@ pub const fn null() -> AtomicPtr<T> {
#[inline]
#[stable(feature = "atomic_access", since = "1.15.0")]
pub fn get_mut(&mut self) -> &mut *mut T {
self.p.get_mut()
// SAFETY:
// `Atomic<T>` is essentially a transparent wrapper around `T`.
unsafe { &mut *self.as_ptr() }
}
/// Gets atomic access to a pointer.
@@ -1672,7 +1702,9 @@ pub fn from_mut_slice(v: &mut [*mut T]) -> &mut [Self] {
#[stable(feature = "atomic_access", since = "1.15.0")]
#[rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0")]
pub const fn into_inner(self) -> *mut T {
self.p.into_inner()
// SAFETY:
// `Atomic<T>` is essentially a transparent wrapper around `T`.
unsafe { transmute(self) }
}
/// Loads a value from the pointer.
@@ -1699,7 +1731,7 @@ pub const fn into_inner(self) -> *mut T {
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub fn load(&self, order: Ordering) -> *mut T {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_load(self.p.get(), order) }
unsafe { atomic_load(self.as_ptr(), order) }
}
/// Stores a value into the pointer.
@@ -1730,7 +1762,7 @@ pub fn load(&self, order: Ordering) -> *mut T {
pub fn store(&self, ptr: *mut T, order: Ordering) {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe {
atomic_store(self.p.get(), ptr, order);
atomic_store(self.as_ptr(), ptr, order);
}
}
@@ -1763,7 +1795,7 @@ pub fn store(&self, ptr: *mut T, order: Ordering) {
#[rustc_should_not_be_called_on_const_items]
pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_swap(self.p.get(), ptr, order) }
unsafe { atomic_swap(self.as_ptr(), ptr, order) }
}
/// Stores a value into the pointer if the current value is the same as the `current` value.
@@ -1887,7 +1919,7 @@ pub fn compare_exchange(
failure: Ordering,
) -> Result<*mut T, *mut T> {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_compare_exchange(self.p.get(), current, new, success, failure) }
unsafe { atomic_compare_exchange(self.as_ptr(), current, new, success, failure) }
}
/// Stores a value into the pointer if the current value is the same as the `current` value.
@@ -1954,7 +1986,7 @@ pub fn compare_exchange_weak(
// but we know for sure that the pointer is valid (we just got it from
// an `UnsafeCell` that we have by reference) and the atomic operation
// itself allows us to safely mutate the `UnsafeCell` contents.
unsafe { atomic_compare_exchange_weak(self.p.get(), current, new, success, failure) }
unsafe { atomic_compare_exchange_weak(self.as_ptr(), current, new, success, failure) }
}
/// An alias for [`AtomicPtr::try_update`].
@@ -2243,7 +2275,7 @@ pub fn fetch_ptr_sub(&self, val: usize, order: Ordering) -> *mut T {
#[rustc_should_not_be_called_on_const_items]
pub fn fetch_byte_add(&self, val: usize, order: Ordering) -> *mut T {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_add(self.p.get(), val, order).cast() }
unsafe { atomic_add(self.as_ptr(), val, order).cast() }
}
/// Offsets the pointer's address by subtracting `val` *bytes*, returning the
@@ -2279,7 +2311,7 @@ pub fn fetch_byte_add(&self, val: usize, order: Ordering) -> *mut T {
#[rustc_should_not_be_called_on_const_items]
pub fn fetch_byte_sub(&self, val: usize, order: Ordering) -> *mut T {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_sub(self.p.get(), val, order).cast() }
unsafe { atomic_sub(self.as_ptr(), val, order).cast() }
}
/// Performs a bitwise "or" operation on the address of the current pointer,
@@ -2330,7 +2362,7 @@ pub fn fetch_byte_sub(&self, val: usize, order: Ordering) -> *mut T {
#[rustc_should_not_be_called_on_const_items]
pub fn fetch_or(&self, val: usize, order: Ordering) -> *mut T {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_or(self.p.get(), val, order).cast() }
unsafe { atomic_or(self.as_ptr(), val, order).cast() }
}
/// Performs a bitwise "and" operation on the address of the current
@@ -2380,7 +2412,7 @@ pub fn fetch_or(&self, val: usize, order: Ordering) -> *mut T {
#[rustc_should_not_be_called_on_const_items]
pub fn fetch_and(&self, val: usize, order: Ordering) -> *mut T {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_and(self.p.get(), val, order).cast() }
unsafe { atomic_and(self.as_ptr(), val, order).cast() }
}
/// Performs a bitwise "xor" operation on the address of the current
@@ -2428,7 +2460,7 @@ pub fn fetch_and(&self, val: usize, order: Ordering) -> *mut T {
#[rustc_should_not_be_called_on_const_items]
pub fn fetch_xor(&self, val: usize, order: Ordering) -> *mut T {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_xor(self.p.get(), val, order).cast() }
unsafe { atomic_xor(self.as_ptr(), val, order).cast() }
}
/// Returns a mutable pointer to the underlying pointer.
@@ -2467,7 +2499,7 @@ pub fn fetch_xor(&self, val: usize, order: Ordering) -> *mut T {
#[rustc_const_stable(feature = "atomic_as_ptr", since = "1.70.0")]
#[rustc_never_returns_null_ptr]
pub const fn as_ptr(&self) -> *mut *mut T {
self.p.get()
self.v.get().cast()
}
}
@@ -2558,10 +2590,7 @@ macro_rules! atomic_int {
/// [module-level documentation]: crate::sync::atomic
#[$stable]
#[$diagnostic_item]
#[repr(C, align($align))]
pub struct $atomic_type {
v: UnsafeCell<$int_type>,
}
pub type $atomic_type = Atomic<$int_type>;
#[$stable]
impl Default for $atomic_type {
@@ -2586,10 +2615,6 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
}
}
// Send is implicitly implemented.
#[$stable]
unsafe impl Sync for $atomic_type {}
impl $atomic_type {
/// Creates a new atomic integer.
///
@@ -2605,7 +2630,9 @@ impl $atomic_type {
#[$const_stable_new]
#[must_use]
pub const fn new(v: $int_type) -> Self {
Self {v: UnsafeCell::new(v)}
// SAFETY:
// `Atomic<T>` is essentially a transparent wrapper around `T`.
unsafe { transmute(v) }
}
/// Creates a new reference to an atomic integer from a pointer.
@@ -2667,7 +2694,6 @@ pub const fn new(v: $int_type) -> Self {
unsafe { &*ptr.cast() }
}
/// Returns a mutable reference to the underlying integer.
///
/// This is safe because the mutable reference guarantees that no other threads are
@@ -2686,7 +2712,9 @@ pub const fn new(v: $int_type) -> Self {
#[inline]
#[$stable_access]
pub fn get_mut(&mut self) -> &mut $int_type {
self.v.get_mut()
// SAFETY:
// `Atomic<T>` is essentially a transparent wrapper around `T`.
unsafe { &mut *self.as_ptr() }
}
#[doc = concat!("Get atomic access to a `&mut ", stringify!($int_type), "`.")]
@@ -2815,7 +2843,9 @@ pub fn from_mut_slice(v: &mut [$int_type]) -> &mut [Self] {
#[$stable_access]
#[$const_stable_into_inner]
pub const fn into_inner(self) -> $int_type {
self.v.into_inner()
// SAFETY:
// `Atomic<T>` is essentially a transparent wrapper around `T`.
unsafe { transmute(self) }
}
/// Loads a value from the atomic integer.
@@ -2841,7 +2871,7 @@ pub const fn into_inner(self) -> $int_type {
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub fn load(&self, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_load(self.v.get(), order) }
unsafe { atomic_load(self.as_ptr(), order) }
}
/// Stores a value into the atomic integer.
@@ -2869,7 +2899,7 @@ pub fn load(&self, order: Ordering) -> $int_type {
#[rustc_should_not_be_called_on_const_items]
pub fn store(&self, val: $int_type, order: Ordering) {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_store(self.v.get(), val, order); }
unsafe { atomic_store(self.as_ptr(), val, order); }
}
/// Stores a value into the atomic integer, returning the previous value.
@@ -2898,7 +2928,7 @@ pub fn store(&self, val: $int_type, order: Ordering) {
#[rustc_should_not_be_called_on_const_items]
pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_swap(self.v.get(), val, order) }
unsafe { atomic_swap(self.as_ptr(), val, order) }
}
/// Stores a value into the atomic integer if the current value is the same as
@@ -3036,7 +3066,7 @@ pub fn compare_exchange(&self,
success: Ordering,
failure: Ordering) -> Result<$int_type, $int_type> {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
unsafe { atomic_compare_exchange(self.as_ptr(), current, new, success, failure) }
}
/// Stores a value into the atomic integer if the current value is the same as
@@ -3101,7 +3131,7 @@ pub fn compare_exchange_weak(&self,
failure: Ordering) -> Result<$int_type, $int_type> {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe {
atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
atomic_compare_exchange_weak(self.as_ptr(), current, new, success, failure)
}
}
@@ -3133,7 +3163,7 @@ pub fn compare_exchange_weak(&self,
#[rustc_should_not_be_called_on_const_items]
pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_add(self.v.get(), val, order) }
unsafe { atomic_add(self.as_ptr(), val, order) }
}
/// Subtracts from the current value, returning the previous value.
@@ -3164,7 +3194,7 @@ pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
#[rustc_should_not_be_called_on_const_items]
pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_sub(self.v.get(), val, order) }
unsafe { atomic_sub(self.as_ptr(), val, order) }
}
/// Bitwise "and" with the current value.
@@ -3198,7 +3228,7 @@ pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
#[rustc_should_not_be_called_on_const_items]
pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_and(self.v.get(), val, order) }
unsafe { atomic_and(self.as_ptr(), val, order) }
}
/// Bitwise "nand" with the current value.
@@ -3232,7 +3262,7 @@ pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
#[rustc_should_not_be_called_on_const_items]
pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_nand(self.v.get(), val, order) }
unsafe { atomic_nand(self.as_ptr(), val, order) }
}
/// Bitwise "or" with the current value.
@@ -3266,7 +3296,7 @@ pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
#[rustc_should_not_be_called_on_const_items]
pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_or(self.v.get(), val, order) }
unsafe { atomic_or(self.as_ptr(), val, order) }
}
/// Bitwise "xor" with the current value.
@@ -3300,7 +3330,7 @@ pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
#[rustc_should_not_be_called_on_const_items]
pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_xor(self.v.get(), val, order) }
unsafe { atomic_xor(self.as_ptr(), val, order) }
}
/// An alias for
@@ -3499,7 +3529,7 @@ pub fn update(
#[rustc_should_not_be_called_on_const_items]
pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { $max_fn(self.v.get(), val, order) }
unsafe { $max_fn(self.as_ptr(), val, order) }
}
/// Minimum with the current value.
@@ -3546,7 +3576,7 @@ pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
#[rustc_should_not_be_called_on_const_items]
pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { $min_fn(self.v.get(), val, order) }
unsafe { $min_fn(self.as_ptr(), val, order) }
}
/// Returns a mutable pointer to the underlying integer.
@@ -3586,7 +3616,7 @@ pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
#[rustc_const_stable(feature = "atomic_as_ptr", since = "1.70.0")]
#[rustc_never_returns_null_ptr]
pub const fn as_ptr(&self) -> *mut $int_type {
self.v.get()
self.v.get().cast()
}
}
}