blob: f727387e6e92849b277329f5f73cefe7dc677ece [file] [log] [blame]
Jakub Koturfc1672b2020-12-21 17:28:14 +01001use core::borrow::{Borrow, BorrowMut};
2use core::cmp;
3use core::fmt;
4use core::marker::PhantomData;
5use core::mem::{self, MaybeUninit};
6use core::ops::{Deref, DerefMut};
7use core::slice;
Joel Galenson0d440922021-04-01 15:34:31 -07008use core::sync::atomic::Ordering;
Jakub Koturfc1672b2020-12-21 17:28:14 +01009
10use crate::alloc::alloc;
11use crate::alloc::boxed::Box;
12use crate::guard::Guard;
Joel Galenson0d440922021-04-01 15:34:31 -070013use crate::primitive::sync::atomic::AtomicUsize;
Jakub Koturfc1672b2020-12-21 17:28:14 +010014use crossbeam_utils::atomic::AtomicConsume;
15
16/// Given ordering for the success case in a compare-exchange operation, returns the strongest
17/// appropriate ordering for the failure case.
18#[inline]
19fn strongest_failure_ordering(ord: Ordering) -> Ordering {
20 use self::Ordering::*;
21 match ord {
22 Relaxed | Release => Relaxed,
23 Acquire | AcqRel => Acquire,
24 _ => SeqCst,
25 }
26}
27
28/// The error returned on failed compare-and-set operation.
Joel Galenson0d440922021-04-01 15:34:31 -070029// TODO: remove in the next major version.
30#[deprecated(note = "Use `CompareExchangeError` instead")]
31pub type CompareAndSetError<'g, T, P> = CompareExchangeError<'g, T, P>;
32
33/// The error returned on failed compare-and-swap operation.
34pub struct CompareExchangeError<'g, T: ?Sized + Pointable, P: Pointer<T>> {
Jakub Koturfc1672b2020-12-21 17:28:14 +010035 /// The value in the atomic pointer at the time of the failed operation.
36 pub current: Shared<'g, T>,
37
38 /// The new value, which the operation failed to store.
39 pub new: P,
40}
41
Joel Galenson0d440922021-04-01 15:34:31 -070042impl<T, P: Pointer<T> + fmt::Debug> fmt::Debug for CompareExchangeError<'_, T, P> {
Jakub Koturfc1672b2020-12-21 17:28:14 +010043 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
Joel Galenson0d440922021-04-01 15:34:31 -070044 f.debug_struct("CompareExchangeError")
Jakub Koturfc1672b2020-12-21 17:28:14 +010045 .field("current", &self.current)
46 .field("new", &self.new)
47 .finish()
48 }
49}
50
51/// Memory orderings for compare-and-set operations.
52///
53/// A compare-and-set operation can have different memory orderings depending on whether it
54/// succeeds or fails. This trait generalizes different ways of specifying memory orderings.
55///
56/// The two ways of specifying orderings for compare-and-set are:
57///
58/// 1. Just one `Ordering` for the success case. In case of failure, the strongest appropriate
59/// ordering is chosen.
60/// 2. A pair of `Ordering`s. The first one is for the success case, while the second one is
61/// for the failure case.
Joel Galenson0d440922021-04-01 15:34:31 -070062// TODO: remove in the next major version.
63#[deprecated(
64 note = "`compare_and_set` and `compare_and_set_weak` that use this trait are deprecated, \
65 use `compare_exchange` or `compare_exchange_weak instead`"
66)]
Jakub Koturfc1672b2020-12-21 17:28:14 +010067pub trait CompareAndSetOrdering {
68 /// The ordering of the operation when it succeeds.
69 fn success(&self) -> Ordering;
70
71 /// The ordering of the operation when it fails.
72 ///
73 /// The failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than
74 /// the success ordering.
75 fn failure(&self) -> Ordering;
76}
77
Joel Galenson0d440922021-04-01 15:34:31 -070078#[allow(deprecated)]
Jakub Koturfc1672b2020-12-21 17:28:14 +010079impl CompareAndSetOrdering for Ordering {
80 #[inline]
81 fn success(&self) -> Ordering {
82 *self
83 }
84
85 #[inline]
86 fn failure(&self) -> Ordering {
87 strongest_failure_ordering(*self)
88 }
89}
90
Joel Galenson0d440922021-04-01 15:34:31 -070091#[allow(deprecated)]
Jakub Koturfc1672b2020-12-21 17:28:14 +010092impl CompareAndSetOrdering for (Ordering, Ordering) {
93 #[inline]
94 fn success(&self) -> Ordering {
95 self.0
96 }
97
98 #[inline]
99 fn failure(&self) -> Ordering {
100 self.1
101 }
102}
103
104/// Returns a bitmask containing the unused least significant bits of an aligned pointer to `T`.
105#[inline]
106fn low_bits<T: ?Sized + Pointable>() -> usize {
107 (1 << T::ALIGN.trailing_zeros()) - 1
108}
109
110/// Panics if the pointer is not properly unaligned.
111#[inline]
112fn ensure_aligned<T: ?Sized + Pointable>(raw: usize) {
113 assert_eq!(raw & low_bits::<T>(), 0, "unaligned pointer");
114}
115
116/// Given a tagged pointer `data`, returns the same pointer, but tagged with `tag`.
117///
118/// `tag` is truncated to fit into the unused bits of the pointer to `T`.
119#[inline]
120fn compose_tag<T: ?Sized + Pointable>(data: usize, tag: usize) -> usize {
121 (data & !low_bits::<T>()) | (tag & low_bits::<T>())
122}
123
124/// Decomposes a tagged pointer `data` into the pointer and the tag.
125#[inline]
126fn decompose_tag<T: ?Sized + Pointable>(data: usize) -> (usize, usize) {
127 (data & !low_bits::<T>(), data & low_bits::<T>())
128}
129
130/// Types that are pointed to by a single word.
131///
132/// In concurrent programming, it is necessary to represent an object within a word because atomic
133/// operations (e.g., reads, writes, read-modify-writes) support only single words. This trait
134/// qualifies such types that are pointed to by a single word.
135///
136/// The trait generalizes `Box<T>` for a sized type `T`. In a box, an object of type `T` is
137/// allocated in heap and it is owned by a single-word pointer. This trait is also implemented for
138/// `[MaybeUninit<T>]` by storing its size along with its elements and pointing to the pair of array
139/// size and elements.
140///
141/// Pointers to `Pointable` types can be stored in [`Atomic`], [`Owned`], and [`Shared`]. In
142/// particular, Crossbeam supports dynamically sized slices as follows.
143///
144/// ```
145/// use std::mem::MaybeUninit;
146/// use crossbeam_epoch::Owned;
147///
148/// let o = Owned::<[MaybeUninit<i32>]>::init(10); // allocating [i32; 10]
149/// ```
150pub trait Pointable {
151 /// The alignment of pointer.
152 const ALIGN: usize;
153
154 /// The type for initializers.
155 type Init;
156
157 /// Initializes a with the given initializer.
158 ///
159 /// # Safety
160 ///
161 /// The result should be a multiple of `ALIGN`.
162 unsafe fn init(init: Self::Init) -> usize;
163
164 /// Dereferences the given pointer.
165 ///
166 /// # Safety
167 ///
168 /// - The given `ptr` should have been initialized with [`Pointable::init`].
169 /// - `ptr` should not have yet been dropped by [`Pointable::drop`].
170 /// - `ptr` should not be mutably dereferenced by [`Pointable::deref_mut`] concurrently.
171 unsafe fn deref<'a>(ptr: usize) -> &'a Self;
172
173 /// Mutably dereferences the given pointer.
174 ///
175 /// # Safety
176 ///
177 /// - The given `ptr` should have been initialized with [`Pointable::init`].
178 /// - `ptr` should not have yet been dropped by [`Pointable::drop`].
179 /// - `ptr` should not be dereferenced by [`Pointable::deref`] or [`Pointable::deref_mut`]
180 /// concurrently.
181 unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self;
182
183 /// Drops the object pointed to by the given pointer.
184 ///
185 /// # Safety
186 ///
187 /// - The given `ptr` should have been initialized with [`Pointable::init`].
188 /// - `ptr` should not have yet been dropped by [`Pointable::drop`].
189 /// - `ptr` should not be dereferenced by [`Pointable::deref`] or [`Pointable::deref_mut`]
190 /// concurrently.
191 unsafe fn drop(ptr: usize);
192}
193
194impl<T> Pointable for T {
195 const ALIGN: usize = mem::align_of::<T>();
196
197 type Init = T;
198
199 unsafe fn init(init: Self::Init) -> usize {
200 Box::into_raw(Box::new(init)) as usize
201 }
202
203 unsafe fn deref<'a>(ptr: usize) -> &'a Self {
204 &*(ptr as *const T)
205 }
206
207 unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self {
208 &mut *(ptr as *mut T)
209 }
210
211 unsafe fn drop(ptr: usize) {
212 drop(Box::from_raw(ptr as *mut T));
213 }
214}
215
216/// Array with size.
217///
218/// # Memory layout
219///
220/// An array consisting of size and elements:
221///
222/// ```text
223/// elements
224/// |
225/// |
226/// ------------------------------------
227/// | size | 0 | 1 | 2 | 3 | 4 | 5 | 6 |
228/// ------------------------------------
229/// ```
230///
231/// Its memory layout is different from that of `Box<[T]>` in that size is in the allocation (not
232/// along with pointer as in `Box<[T]>`).
233///
234/// Elements are not present in the type, but they will be in the allocation.
235/// ```
236///
237// TODO(@jeehoonkang): once we bump the minimum required Rust version to 1.44 or newer, use
238// [`alloc::alloc::Layout::extend`] instead.
239#[repr(C)]
240struct Array<T> {
Joel Galenson6c485dc2021-06-21 12:20:39 -0700241 /// The number of elements (not the number of bytes).
242 len: usize,
Jakub Koturfc1672b2020-12-21 17:28:14 +0100243 elements: [MaybeUninit<T>; 0],
244}
245
246impl<T> Pointable for [MaybeUninit<T>] {
247 const ALIGN: usize = mem::align_of::<Array<T>>();
248
249 type Init = usize;
250
Joel Galenson6c485dc2021-06-21 12:20:39 -0700251 unsafe fn init(len: Self::Init) -> usize {
252 let size = mem::size_of::<Array<T>>() + mem::size_of::<MaybeUninit<T>>() * len;
Jakub Koturfc1672b2020-12-21 17:28:14 +0100253 let align = mem::align_of::<Array<T>>();
254 let layout = alloc::Layout::from_size_align(size, align).unwrap();
255 let ptr = alloc::alloc(layout) as *mut Array<T>;
Joel Galenson08e1a732021-05-19 15:02:32 -0700256 if ptr.is_null() {
257 alloc::handle_alloc_error(layout);
258 }
Joel Galenson6c485dc2021-06-21 12:20:39 -0700259 (*ptr).len = len;
Jakub Koturfc1672b2020-12-21 17:28:14 +0100260 ptr as usize
261 }
262
263 unsafe fn deref<'a>(ptr: usize) -> &'a Self {
264 let array = &*(ptr as *const Array<T>);
Joel Galenson6c485dc2021-06-21 12:20:39 -0700265 slice::from_raw_parts(array.elements.as_ptr() as *const _, array.len)
Jakub Koturfc1672b2020-12-21 17:28:14 +0100266 }
267
268 unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self {
269 let array = &*(ptr as *mut Array<T>);
Joel Galenson6c485dc2021-06-21 12:20:39 -0700270 slice::from_raw_parts_mut(array.elements.as_ptr() as *mut _, array.len)
Jakub Koturfc1672b2020-12-21 17:28:14 +0100271 }
272
273 unsafe fn drop(ptr: usize) {
274 let array = &*(ptr as *mut Array<T>);
Joel Galenson6c485dc2021-06-21 12:20:39 -0700275 let size = mem::size_of::<Array<T>>() + mem::size_of::<MaybeUninit<T>>() * array.len;
Jakub Koturfc1672b2020-12-21 17:28:14 +0100276 let align = mem::align_of::<Array<T>>();
277 let layout = alloc::Layout::from_size_align(size, align).unwrap();
278 alloc::dealloc(ptr as *mut u8, layout);
279 }
280}
281
282/// An atomic pointer that can be safely shared between threads.
283///
284/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
285/// least significant bits of the address. For example, the tag for a pointer to a sized type `T`
286/// should be less than `(1 << mem::align_of::<T>().trailing_zeros())`.
287///
288/// Any method that loads the pointer must be passed a reference to a [`Guard`].
289///
290/// Crossbeam supports dynamically sized types. See [`Pointable`] for details.
291pub struct Atomic<T: ?Sized + Pointable> {
292 data: AtomicUsize,
293 _marker: PhantomData<*mut T>,
294}
295
296unsafe impl<T: ?Sized + Pointable + Send + Sync> Send for Atomic<T> {}
297unsafe impl<T: ?Sized + Pointable + Send + Sync> Sync for Atomic<T> {}
298
299impl<T> Atomic<T> {
300 /// Allocates `value` on the heap and returns a new atomic pointer pointing to it.
301 ///
302 /// # Examples
303 ///
304 /// ```
305 /// use crossbeam_epoch::Atomic;
306 ///
307 /// let a = Atomic::new(1234);
308 /// ```
309 pub fn new(init: T) -> Atomic<T> {
310 Self::init(init)
311 }
312}
313
314impl<T: ?Sized + Pointable> Atomic<T> {
315 /// Allocates `value` on the heap and returns a new atomic pointer pointing to it.
316 ///
317 /// # Examples
318 ///
319 /// ```
320 /// use crossbeam_epoch::Atomic;
321 ///
322 /// let a = Atomic::<i32>::init(1234);
323 /// ```
324 pub fn init(init: T::Init) -> Atomic<T> {
325 Self::from(Owned::init(init))
326 }
327
328 /// Returns a new atomic pointer pointing to the tagged pointer `data`.
329 fn from_usize(data: usize) -> Self {
330 Self {
331 data: AtomicUsize::new(data),
332 _marker: PhantomData,
333 }
334 }
335
336 /// Returns a new null atomic pointer.
337 ///
338 /// # Examples
339 ///
340 /// ```
341 /// use crossbeam_epoch::Atomic;
342 ///
343 /// let a = Atomic::<i32>::null();
344 /// ```
345 ///
Joel Galenson0d440922021-04-01 15:34:31 -0700346 #[cfg_attr(all(feature = "nightly", not(crossbeam_loom)), const_fn::const_fn)]
347 pub fn null() -> Atomic<T> {
Jakub Koturfc1672b2020-12-21 17:28:14 +0100348 Self {
349 data: AtomicUsize::new(0),
350 _marker: PhantomData,
351 }
352 }
353
354 /// Loads a `Shared` from the atomic pointer.
355 ///
356 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
357 /// operation.
358 ///
359 /// # Examples
360 ///
361 /// ```
362 /// use crossbeam_epoch::{self as epoch, Atomic};
363 /// use std::sync::atomic::Ordering::SeqCst;
364 ///
365 /// let a = Atomic::new(1234);
366 /// let guard = &epoch::pin();
367 /// let p = a.load(SeqCst, guard);
368 /// ```
369 pub fn load<'g>(&self, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
370 unsafe { Shared::from_usize(self.data.load(ord)) }
371 }
372
373 /// Loads a `Shared` from the atomic pointer using a "consume" memory ordering.
374 ///
375 /// This is similar to the "acquire" ordering, except that an ordering is
376 /// only guaranteed with operations that "depend on" the result of the load.
377 /// However consume loads are usually much faster than acquire loads on
378 /// architectures with a weak memory model since they don't require memory
379 /// fence instructions.
380 ///
381 /// The exact definition of "depend on" is a bit vague, but it works as you
382 /// would expect in practice since a lot of software, especially the Linux
383 /// kernel, rely on this behavior.
384 ///
385 /// # Examples
386 ///
387 /// ```
388 /// use crossbeam_epoch::{self as epoch, Atomic};
389 ///
390 /// let a = Atomic::new(1234);
391 /// let guard = &epoch::pin();
392 /// let p = a.load_consume(guard);
393 /// ```
394 pub fn load_consume<'g>(&self, _: &'g Guard) -> Shared<'g, T> {
395 unsafe { Shared::from_usize(self.data.load_consume()) }
396 }
397
398 /// Stores a `Shared` or `Owned` pointer into the atomic pointer.
399 ///
400 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
401 /// operation.
402 ///
403 /// # Examples
404 ///
405 /// ```
406 /// use crossbeam_epoch::{Atomic, Owned, Shared};
407 /// use std::sync::atomic::Ordering::SeqCst;
408 ///
409 /// let a = Atomic::new(1234);
410 /// a.store(Shared::null(), SeqCst);
411 /// a.store(Owned::new(1234), SeqCst);
412 /// ```
413 pub fn store<P: Pointer<T>>(&self, new: P, ord: Ordering) {
414 self.data.store(new.into_usize(), ord);
415 }
416
417 /// Stores a `Shared` or `Owned` pointer into the atomic pointer, returning the previous
418 /// `Shared`.
419 ///
420 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
421 /// operation.
422 ///
423 /// # Examples
424 ///
425 /// ```
426 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
427 /// use std::sync::atomic::Ordering::SeqCst;
428 ///
429 /// let a = Atomic::new(1234);
430 /// let guard = &epoch::pin();
431 /// let p = a.swap(Shared::null(), SeqCst, guard);
432 /// ```
433 pub fn swap<'g, P: Pointer<T>>(&self, new: P, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
434 unsafe { Shared::from_usize(self.data.swap(new.into_usize(), ord)) }
435 }
436
437 /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
438 /// value is the same as `current`. The tag is also taken into account, so two pointers to the
439 /// same object, but with different tags, will not be considered equal.
440 ///
441 /// The return value is a result indicating whether the new pointer was written. On success the
442 /// pointer that was written is returned. On failure the actual current value and `new` are
443 /// returned.
444 ///
Joel Galenson0d440922021-04-01 15:34:31 -0700445 /// This method takes two `Ordering` arguments to describe the memory
446 /// ordering of this operation. `success` describes the required ordering for the
447 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
448 /// `failure` describes the required ordering for the load operation that takes place when
449 /// the comparison fails. Using `Acquire` as success ordering makes the store part
450 /// of this operation `Relaxed`, and using `Release` makes the successful load
451 /// `Relaxed`. The failure ordering can only be `SeqCst`, `Acquire` or `Relaxed`
452 /// and must be equivalent to or weaker than the success ordering.
Jakub Koturfc1672b2020-12-21 17:28:14 +0100453 ///
454 /// # Examples
455 ///
456 /// ```
457 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
458 /// use std::sync::atomic::Ordering::SeqCst;
459 ///
460 /// let a = Atomic::new(1234);
461 ///
462 /// let guard = &epoch::pin();
463 /// let curr = a.load(SeqCst, guard);
Joel Galenson0d440922021-04-01 15:34:31 -0700464 /// let res1 = a.compare_exchange(curr, Shared::null(), SeqCst, SeqCst, guard);
465 /// let res2 = a.compare_exchange(curr, Owned::new(5678), SeqCst, SeqCst, guard);
466 /// ```
467 pub fn compare_exchange<'g, P>(
468 &self,
469 current: Shared<'_, T>,
470 new: P,
471 success: Ordering,
472 failure: Ordering,
473 _: &'g Guard,
474 ) -> Result<Shared<'g, T>, CompareExchangeError<'g, T, P>>
475 where
476 P: Pointer<T>,
477 {
478 let new = new.into_usize();
479 self.data
480 .compare_exchange(current.into_usize(), new, success, failure)
481 .map(|_| unsafe { Shared::from_usize(new) })
482 .map_err(|current| unsafe {
483 CompareExchangeError {
484 current: Shared::from_usize(current),
485 new: P::from_usize(new),
486 }
487 })
488 }
489
490 /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
491 /// value is the same as `current`. The tag is also taken into account, so two pointers to the
492 /// same object, but with different tags, will not be considered equal.
493 ///
494 /// Unlike [`compare_exchange`], this method is allowed to spuriously fail even when comparison
495 /// succeeds, which can result in more efficient code on some platforms. The return value is a
496 /// result indicating whether the new pointer was written. On success the pointer that was
497 /// written is returned. On failure the actual current value and `new` are returned.
498 ///
499 /// This method takes two `Ordering` arguments to describe the memory
500 /// ordering of this operation. `success` describes the required ordering for the
501 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
502 /// `failure` describes the required ordering for the load operation that takes place when
503 /// the comparison fails. Using `Acquire` as success ordering makes the store part
504 /// of this operation `Relaxed`, and using `Release` makes the successful load
505 /// `Relaxed`. The failure ordering can only be `SeqCst`, `Acquire` or `Relaxed`
506 /// and must be equivalent to or weaker than the success ordering.
507 ///
508 /// [`compare_exchange`]: Atomic::compare_exchange
509 ///
510 /// # Examples
511 ///
512 /// ```
513 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
514 /// use std::sync::atomic::Ordering::SeqCst;
515 ///
516 /// let a = Atomic::new(1234);
517 /// let guard = &epoch::pin();
518 ///
519 /// let mut new = Owned::new(5678);
520 /// let mut ptr = a.load(SeqCst, guard);
521 /// loop {
522 /// match a.compare_exchange_weak(ptr, new, SeqCst, SeqCst, guard) {
523 /// Ok(p) => {
524 /// ptr = p;
525 /// break;
526 /// }
527 /// Err(err) => {
528 /// ptr = err.current;
529 /// new = err.new;
530 /// }
531 /// }
532 /// }
533 ///
534 /// let mut curr = a.load(SeqCst, guard);
535 /// loop {
536 /// match a.compare_exchange_weak(curr, Shared::null(), SeqCst, SeqCst, guard) {
537 /// Ok(_) => break,
538 /// Err(err) => curr = err.current,
539 /// }
540 /// }
541 /// ```
542 pub fn compare_exchange_weak<'g, P>(
543 &self,
544 current: Shared<'_, T>,
545 new: P,
546 success: Ordering,
547 failure: Ordering,
548 _: &'g Guard,
549 ) -> Result<Shared<'g, T>, CompareExchangeError<'g, T, P>>
550 where
551 P: Pointer<T>,
552 {
553 let new = new.into_usize();
554 self.data
555 .compare_exchange_weak(current.into_usize(), new, success, failure)
556 .map(|_| unsafe { Shared::from_usize(new) })
557 .map_err(|current| unsafe {
558 CompareExchangeError {
559 current: Shared::from_usize(current),
560 new: P::from_usize(new),
561 }
562 })
563 }
564
David LeGare15f97022022-03-01 19:01:24 +0000565 /// Fetches the pointer, and then applies a function to it that returns a new value.
566 /// Returns a `Result` of `Ok(previous_value)` if the function returned `Some`, else `Err(_)`.
567 ///
568 /// Note that the given function may be called multiple times if the value has been changed by
569 /// other threads in the meantime, as long as the function returns `Some(_)`, but the function
570 /// will have been applied only once to the stored value.
571 ///
572 /// `fetch_update` takes two [`Ordering`] arguments to describe the memory
573 /// ordering of this operation. The first describes the required ordering for
574 /// when the operation finally succeeds while the second describes the
575 /// required ordering for loads. These correspond to the success and failure
576 /// orderings of [`Atomic::compare_exchange`] respectively.
577 ///
578 /// Using [`Acquire`] as success ordering makes the store part of this
579 /// operation [`Relaxed`], and using [`Release`] makes the final successful
580 /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`],
581 /// [`Acquire`] or [`Relaxed`] and must be equivalent to or weaker than the
582 /// success ordering.
583 ///
584 /// [`Relaxed`]: Ordering::Relaxed
585 /// [`Acquire`]: Ordering::Acquire
586 /// [`Release`]: Ordering::Release
587 /// [`SeqCst`]: Ordering::SeqCst
588 ///
589 /// # Examples
590 ///
591 /// ```
592 /// use crossbeam_epoch::{self as epoch, Atomic};
593 /// use std::sync::atomic::Ordering::SeqCst;
594 ///
595 /// let a = Atomic::new(1234);
596 /// let guard = &epoch::pin();
597 ///
598 /// let res1 = a.fetch_update(SeqCst, SeqCst, guard, |x| Some(x.with_tag(1)));
599 /// assert!(res1.is_ok());
600 ///
601 /// let res2 = a.fetch_update(SeqCst, SeqCst, guard, |x| None);
602 /// assert!(res2.is_err());
603 /// ```
604 pub fn fetch_update<'g, F>(
605 &self,
606 set_order: Ordering,
607 fail_order: Ordering,
608 guard: &'g Guard,
609 mut func: F,
610 ) -> Result<Shared<'g, T>, Shared<'g, T>>
611 where
612 F: FnMut(Shared<'g, T>) -> Option<Shared<'g, T>>,
613 {
614 let mut prev = self.load(fail_order, guard);
615 while let Some(next) = func(prev) {
616 match self.compare_exchange_weak(prev, next, set_order, fail_order, guard) {
617 Ok(shared) => return Ok(shared),
618 Err(next_prev) => prev = next_prev.current,
619 }
620 }
621 Err(prev)
622 }
623
Joel Galenson0d440922021-04-01 15:34:31 -0700624 /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
625 /// value is the same as `current`. The tag is also taken into account, so two pointers to the
626 /// same object, but with different tags, will not be considered equal.
627 ///
628 /// The return value is a result indicating whether the new pointer was written. On success the
629 /// pointer that was written is returned. On failure the actual current value and `new` are
630 /// returned.
631 ///
632 /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
633 /// ordering of this operation.
634 ///
635 /// # Migrating to `compare_exchange`
636 ///
637 /// `compare_and_set` is equivalent to `compare_exchange` with the following mapping for
638 /// memory orderings:
639 ///
640 /// Original | Success | Failure
641 /// -------- | ------- | -------
642 /// Relaxed | Relaxed | Relaxed
643 /// Acquire | Acquire | Acquire
644 /// Release | Release | Relaxed
645 /// AcqRel | AcqRel | Acquire
646 /// SeqCst | SeqCst | SeqCst
647 ///
648 /// # Examples
649 ///
650 /// ```
651 /// # #![allow(deprecated)]
652 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
653 /// use std::sync::atomic::Ordering::SeqCst;
654 ///
655 /// let a = Atomic::new(1234);
656 ///
657 /// let guard = &epoch::pin();
658 /// let curr = a.load(SeqCst, guard);
Jakub Koturfc1672b2020-12-21 17:28:14 +0100659 /// let res1 = a.compare_and_set(curr, Shared::null(), SeqCst, guard);
660 /// let res2 = a.compare_and_set(curr, Owned::new(5678), SeqCst, guard);
661 /// ```
Joel Galenson0d440922021-04-01 15:34:31 -0700662 // TODO: remove in the next major version.
663 #[allow(deprecated)]
664 #[deprecated(note = "Use `compare_exchange` instead")]
Jakub Koturfc1672b2020-12-21 17:28:14 +0100665 pub fn compare_and_set<'g, O, P>(
666 &self,
667 current: Shared<'_, T>,
668 new: P,
669 ord: O,
Joel Galenson0d440922021-04-01 15:34:31 -0700670 guard: &'g Guard,
Jakub Koturfc1672b2020-12-21 17:28:14 +0100671 ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>>
672 where
673 O: CompareAndSetOrdering,
674 P: Pointer<T>,
675 {
Joel Galenson0d440922021-04-01 15:34:31 -0700676 self.compare_exchange(current, new, ord.success(), ord.failure(), guard)
Jakub Koturfc1672b2020-12-21 17:28:14 +0100677 }
678
679 /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
680 /// value is the same as `current`. The tag is also taken into account, so two pointers to the
681 /// same object, but with different tags, will not be considered equal.
682 ///
683 /// Unlike [`compare_and_set`], this method is allowed to spuriously fail even when comparison
684 /// succeeds, which can result in more efficient code on some platforms. The return value is a
685 /// result indicating whether the new pointer was written. On success the pointer that was
686 /// written is returned. On failure the actual current value and `new` are returned.
687 ///
688 /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
689 /// ordering of this operation.
690 ///
691 /// [`compare_and_set`]: Atomic::compare_and_set
692 ///
Joel Galenson0d440922021-04-01 15:34:31 -0700693 /// # Migrating to `compare_exchange_weak`
694 ///
695 /// `compare_and_set_weak` is equivalent to `compare_exchange_weak` with the following mapping for
696 /// memory orderings:
697 ///
698 /// Original | Success | Failure
699 /// -------- | ------- | -------
700 /// Relaxed | Relaxed | Relaxed
701 /// Acquire | Acquire | Acquire
702 /// Release | Release | Relaxed
703 /// AcqRel | AcqRel | Acquire
704 /// SeqCst | SeqCst | SeqCst
705 ///
Jakub Koturfc1672b2020-12-21 17:28:14 +0100706 /// # Examples
707 ///
708 /// ```
Joel Galenson0d440922021-04-01 15:34:31 -0700709 /// # #![allow(deprecated)]
Jakub Koturfc1672b2020-12-21 17:28:14 +0100710 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
711 /// use std::sync::atomic::Ordering::SeqCst;
712 ///
713 /// let a = Atomic::new(1234);
714 /// let guard = &epoch::pin();
715 ///
716 /// let mut new = Owned::new(5678);
717 /// let mut ptr = a.load(SeqCst, guard);
718 /// loop {
719 /// match a.compare_and_set_weak(ptr, new, SeqCst, guard) {
720 /// Ok(p) => {
721 /// ptr = p;
722 /// break;
723 /// }
724 /// Err(err) => {
725 /// ptr = err.current;
726 /// new = err.new;
727 /// }
728 /// }
729 /// }
730 ///
731 /// let mut curr = a.load(SeqCst, guard);
732 /// loop {
733 /// match a.compare_and_set_weak(curr, Shared::null(), SeqCst, guard) {
734 /// Ok(_) => break,
735 /// Err(err) => curr = err.current,
736 /// }
737 /// }
738 /// ```
Joel Galenson0d440922021-04-01 15:34:31 -0700739 // TODO: remove in the next major version.
740 #[allow(deprecated)]
741 #[deprecated(note = "Use `compare_exchange_weak` instead")]
Jakub Koturfc1672b2020-12-21 17:28:14 +0100742 pub fn compare_and_set_weak<'g, O, P>(
743 &self,
744 current: Shared<'_, T>,
745 new: P,
746 ord: O,
Joel Galenson0d440922021-04-01 15:34:31 -0700747 guard: &'g Guard,
Jakub Koturfc1672b2020-12-21 17:28:14 +0100748 ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>>
749 where
750 O: CompareAndSetOrdering,
751 P: Pointer<T>,
752 {
Joel Galenson0d440922021-04-01 15:34:31 -0700753 self.compare_exchange_weak(current, new, ord.success(), ord.failure(), guard)
Jakub Koturfc1672b2020-12-21 17:28:14 +0100754 }
755
756 /// Bitwise "and" with the current tag.
757 ///
758 /// Performs a bitwise "and" operation on the current tag and the argument `val`, and sets the
759 /// new tag to the result. Returns the previous pointer.
760 ///
761 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
762 /// operation.
763 ///
764 /// # Examples
765 ///
766 /// ```
767 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
768 /// use std::sync::atomic::Ordering::SeqCst;
769 ///
770 /// let a = Atomic::<i32>::from(Shared::null().with_tag(3));
771 /// let guard = &epoch::pin();
772 /// assert_eq!(a.fetch_and(2, SeqCst, guard).tag(), 3);
773 /// assert_eq!(a.load(SeqCst, guard).tag(), 2);
774 /// ```
775 pub fn fetch_and<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
776 unsafe { Shared::from_usize(self.data.fetch_and(val | !low_bits::<T>(), ord)) }
777 }
778
779 /// Bitwise "or" with the current tag.
780 ///
781 /// Performs a bitwise "or" operation on the current tag and the argument `val`, and sets the
782 /// new tag to the result. Returns the previous pointer.
783 ///
784 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
785 /// operation.
786 ///
787 /// # Examples
788 ///
789 /// ```
790 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
791 /// use std::sync::atomic::Ordering::SeqCst;
792 ///
793 /// let a = Atomic::<i32>::from(Shared::null().with_tag(1));
794 /// let guard = &epoch::pin();
795 /// assert_eq!(a.fetch_or(2, SeqCst, guard).tag(), 1);
796 /// assert_eq!(a.load(SeqCst, guard).tag(), 3);
797 /// ```
798 pub fn fetch_or<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
799 unsafe { Shared::from_usize(self.data.fetch_or(val & low_bits::<T>(), ord)) }
800 }
801
802 /// Bitwise "xor" with the current tag.
803 ///
804 /// Performs a bitwise "xor" operation on the current tag and the argument `val`, and sets the
805 /// new tag to the result. Returns the previous pointer.
806 ///
807 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
808 /// operation.
809 ///
810 /// # Examples
811 ///
812 /// ```
813 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
814 /// use std::sync::atomic::Ordering::SeqCst;
815 ///
816 /// let a = Atomic::<i32>::from(Shared::null().with_tag(1));
817 /// let guard = &epoch::pin();
818 /// assert_eq!(a.fetch_xor(3, SeqCst, guard).tag(), 1);
819 /// assert_eq!(a.load(SeqCst, guard).tag(), 2);
820 /// ```
821 pub fn fetch_xor<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
822 unsafe { Shared::from_usize(self.data.fetch_xor(val & low_bits::<T>(), ord)) }
823 }
824
825 /// Takes ownership of the pointee.
826 ///
827 /// This consumes the atomic and converts it into [`Owned`]. As [`Atomic`] doesn't have a
828 /// destructor and doesn't drop the pointee while [`Owned`] does, this is suitable for
829 /// destructors of data structures.
830 ///
831 /// # Panics
832 ///
833 /// Panics if this pointer is null, but only in debug mode.
834 ///
835 /// # Safety
836 ///
837 /// This method may be called only if the pointer is valid and nobody else is holding a
838 /// reference to the same object.
839 ///
840 /// # Examples
841 ///
842 /// ```rust
843 /// # use std::mem;
844 /// # use crossbeam_epoch::Atomic;
845 /// struct DataStructure {
846 /// ptr: Atomic<usize>,
847 /// }
848 ///
849 /// impl Drop for DataStructure {
850 /// fn drop(&mut self) {
851 /// // By now the DataStructure lives only in our thread and we are sure we don't hold
852 /// // any Shared or & to it ourselves.
853 /// unsafe {
854 /// drop(mem::replace(&mut self.ptr, Atomic::null()).into_owned());
855 /// }
856 /// }
857 /// }
858 /// ```
859 pub unsafe fn into_owned(self) -> Owned<T> {
Joel Galenson0d440922021-04-01 15:34:31 -0700860 #[cfg(crossbeam_loom)]
861 {
862 // FIXME: loom does not yet support into_inner, so we use unsync_load for now,
863 // which should have the same synchronization properties:
864 // https://github.com/tokio-rs/loom/issues/117
865 Owned::from_usize(self.data.unsync_load())
866 }
867 #[cfg(not(crossbeam_loom))]
868 {
869 Owned::from_usize(self.data.into_inner())
870 }
Jakub Koturfc1672b2020-12-21 17:28:14 +0100871 }
872}
873
874impl<T: ?Sized + Pointable> fmt::Debug for Atomic<T> {
875 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
876 let data = self.data.load(Ordering::SeqCst);
877 let (raw, tag) = decompose_tag::<T>(data);
878
879 f.debug_struct("Atomic")
880 .field("raw", &raw)
881 .field("tag", &tag)
882 .finish()
883 }
884}
885
886impl<T: ?Sized + Pointable> fmt::Pointer for Atomic<T> {
887 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
888 let data = self.data.load(Ordering::SeqCst);
889 let (raw, _) = decompose_tag::<T>(data);
890 fmt::Pointer::fmt(&(unsafe { T::deref(raw) as *const _ }), f)
891 }
892}
893
894impl<T: ?Sized + Pointable> Clone for Atomic<T> {
895 /// Returns a copy of the atomic value.
896 ///
897 /// Note that a `Relaxed` load is used here. If you need synchronization, use it with other
898 /// atomics or fences.
899 fn clone(&self) -> Self {
900 let data = self.data.load(Ordering::Relaxed);
901 Atomic::from_usize(data)
902 }
903}
904
905impl<T: ?Sized + Pointable> Default for Atomic<T> {
906 fn default() -> Self {
907 Atomic::null()
908 }
909}
910
911impl<T: ?Sized + Pointable> From<Owned<T>> for Atomic<T> {
912 /// Returns a new atomic pointer pointing to `owned`.
913 ///
914 /// # Examples
915 ///
916 /// ```
917 /// use crossbeam_epoch::{Atomic, Owned};
918 ///
919 /// let a = Atomic::<i32>::from(Owned::new(1234));
920 /// ```
921 fn from(owned: Owned<T>) -> Self {
922 let data = owned.data;
923 mem::forget(owned);
924 Self::from_usize(data)
925 }
926}
927
928impl<T> From<Box<T>> for Atomic<T> {
929 fn from(b: Box<T>) -> Self {
930 Self::from(Owned::from(b))
931 }
932}
933
934impl<T> From<T> for Atomic<T> {
935 fn from(t: T) -> Self {
936 Self::new(t)
937 }
938}
939
940impl<'g, T: ?Sized + Pointable> From<Shared<'g, T>> for Atomic<T> {
941 /// Returns a new atomic pointer pointing to `ptr`.
942 ///
943 /// # Examples
944 ///
945 /// ```
946 /// use crossbeam_epoch::{Atomic, Shared};
947 ///
948 /// let a = Atomic::<i32>::from(Shared::<i32>::null());
949 /// ```
950 fn from(ptr: Shared<'g, T>) -> Self {
951 Self::from_usize(ptr.data)
952 }
953}
954
955impl<T> From<*const T> for Atomic<T> {
956 /// Returns a new atomic pointer pointing to `raw`.
957 ///
958 /// # Examples
959 ///
960 /// ```
961 /// use std::ptr;
962 /// use crossbeam_epoch::Atomic;
963 ///
964 /// let a = Atomic::<i32>::from(ptr::null::<i32>());
965 /// ```
966 fn from(raw: *const T) -> Self {
967 Self::from_usize(raw as usize)
968 }
969}
970
971/// A trait for either `Owned` or `Shared` pointers.
972pub trait Pointer<T: ?Sized + Pointable> {
973 /// Returns the machine representation of the pointer.
974 fn into_usize(self) -> usize;
975
976 /// Returns a new pointer pointing to the tagged pointer `data`.
977 ///
978 /// # Safety
979 ///
980 /// The given `data` should have been created by `Pointer::into_usize()`, and one `data` should
981 /// not be converted back by `Pointer::from_usize()` multiple times.
982 unsafe fn from_usize(data: usize) -> Self;
983}
984
985/// An owned heap-allocated object.
986///
987/// This type is very similar to `Box<T>`.
988///
989/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
990/// least significant bits of the address.
991pub struct Owned<T: ?Sized + Pointable> {
992 data: usize,
993 _marker: PhantomData<Box<T>>,
994}
995
996impl<T: ?Sized + Pointable> Pointer<T> for Owned<T> {
997 #[inline]
998 fn into_usize(self) -> usize {
999 let data = self.data;
1000 mem::forget(self);
1001 data
1002 }
1003
1004 /// Returns a new pointer pointing to the tagged pointer `data`.
1005 ///
1006 /// # Panics
1007 ///
1008 /// Panics if the data is zero in debug mode.
1009 #[inline]
1010 unsafe fn from_usize(data: usize) -> Self {
1011 debug_assert!(data != 0, "converting zero into `Owned`");
1012 Owned {
1013 data,
1014 _marker: PhantomData,
1015 }
1016 }
1017}
1018
1019impl<T> Owned<T> {
1020 /// Returns a new owned pointer pointing to `raw`.
1021 ///
1022 /// This function is unsafe because improper use may lead to memory problems. Argument `raw`
1023 /// must be a valid pointer. Also, a double-free may occur if the function is called twice on
1024 /// the same raw pointer.
1025 ///
1026 /// # Panics
1027 ///
1028 /// Panics if `raw` is not properly aligned.
1029 ///
1030 /// # Safety
1031 ///
1032 /// The given `raw` should have been derived from `Owned`, and one `raw` should not be converted
1033 /// back by `Owned::from_raw()` multiple times.
1034 ///
1035 /// # Examples
1036 ///
1037 /// ```
1038 /// use crossbeam_epoch::Owned;
1039 ///
1040 /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) };
1041 /// ```
1042 pub unsafe fn from_raw(raw: *mut T) -> Owned<T> {
1043 let raw = raw as usize;
1044 ensure_aligned::<T>(raw);
1045 Self::from_usize(raw)
1046 }
1047
1048 /// Converts the owned pointer into a `Box`.
1049 ///
1050 /// # Examples
1051 ///
1052 /// ```
1053 /// use crossbeam_epoch::Owned;
1054 ///
1055 /// let o = Owned::new(1234);
1056 /// let b: Box<i32> = o.into_box();
1057 /// assert_eq!(*b, 1234);
1058 /// ```
1059 pub fn into_box(self) -> Box<T> {
1060 let (raw, _) = decompose_tag::<T>(self.data);
1061 mem::forget(self);
1062 unsafe { Box::from_raw(raw as *mut _) }
1063 }
1064
1065 /// Allocates `value` on the heap and returns a new owned pointer pointing to it.
1066 ///
1067 /// # Examples
1068 ///
1069 /// ```
1070 /// use crossbeam_epoch::Owned;
1071 ///
1072 /// let o = Owned::new(1234);
1073 /// ```
1074 pub fn new(init: T) -> Owned<T> {
1075 Self::init(init)
1076 }
1077}
1078
1079impl<T: ?Sized + Pointable> Owned<T> {
1080 /// Allocates `value` on the heap and returns a new owned pointer pointing to it.
1081 ///
1082 /// # Examples
1083 ///
1084 /// ```
1085 /// use crossbeam_epoch::Owned;
1086 ///
1087 /// let o = Owned::<i32>::init(1234);
1088 /// ```
1089 pub fn init(init: T::Init) -> Owned<T> {
1090 unsafe { Self::from_usize(T::init(init)) }
1091 }
1092
1093 /// Converts the owned pointer into a [`Shared`].
1094 ///
1095 /// # Examples
1096 ///
1097 /// ```
1098 /// use crossbeam_epoch::{self as epoch, Owned};
1099 ///
1100 /// let o = Owned::new(1234);
1101 /// let guard = &epoch::pin();
1102 /// let p = o.into_shared(guard);
1103 /// ```
1104 #[allow(clippy::needless_lifetimes)]
1105 pub fn into_shared<'g>(self, _: &'g Guard) -> Shared<'g, T> {
1106 unsafe { Shared::from_usize(self.into_usize()) }
1107 }
1108
1109 /// Returns the tag stored within the pointer.
1110 ///
1111 /// # Examples
1112 ///
1113 /// ```
1114 /// use crossbeam_epoch::Owned;
1115 ///
1116 /// assert_eq!(Owned::new(1234).tag(), 0);
1117 /// ```
1118 pub fn tag(&self) -> usize {
1119 let (_, tag) = decompose_tag::<T>(self.data);
1120 tag
1121 }
1122
1123 /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the
1124 /// unused bits of the pointer to `T`.
1125 ///
1126 /// # Examples
1127 ///
1128 /// ```
1129 /// use crossbeam_epoch::Owned;
1130 ///
1131 /// let o = Owned::new(0u64);
1132 /// assert_eq!(o.tag(), 0);
1133 /// let o = o.with_tag(2);
1134 /// assert_eq!(o.tag(), 2);
1135 /// ```
1136 pub fn with_tag(self, tag: usize) -> Owned<T> {
1137 let data = self.into_usize();
1138 unsafe { Self::from_usize(compose_tag::<T>(data, tag)) }
1139 }
1140}
1141
1142impl<T: ?Sized + Pointable> Drop for Owned<T> {
1143 fn drop(&mut self) {
1144 let (raw, _) = decompose_tag::<T>(self.data);
1145 unsafe {
1146 T::drop(raw);
1147 }
1148 }
1149}
1150
1151impl<T: ?Sized + Pointable> fmt::Debug for Owned<T> {
1152 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1153 let (raw, tag) = decompose_tag::<T>(self.data);
1154
1155 f.debug_struct("Owned")
1156 .field("raw", &raw)
1157 .field("tag", &tag)
1158 .finish()
1159 }
1160}
1161
1162impl<T: Clone> Clone for Owned<T> {
1163 fn clone(&self) -> Self {
1164 Owned::new((**self).clone()).with_tag(self.tag())
1165 }
1166}
1167
1168impl<T: ?Sized + Pointable> Deref for Owned<T> {
1169 type Target = T;
1170
1171 fn deref(&self) -> &T {
1172 let (raw, _) = decompose_tag::<T>(self.data);
1173 unsafe { T::deref(raw) }
1174 }
1175}
1176
1177impl<T: ?Sized + Pointable> DerefMut for Owned<T> {
1178 fn deref_mut(&mut self) -> &mut T {
1179 let (raw, _) = decompose_tag::<T>(self.data);
1180 unsafe { T::deref_mut(raw) }
1181 }
1182}
1183
1184impl<T> From<T> for Owned<T> {
1185 fn from(t: T) -> Self {
1186 Owned::new(t)
1187 }
1188}
1189
1190impl<T> From<Box<T>> for Owned<T> {
1191 /// Returns a new owned pointer pointing to `b`.
1192 ///
1193 /// # Panics
1194 ///
1195 /// Panics if the pointer (the `Box`) is not properly aligned.
1196 ///
1197 /// # Examples
1198 ///
1199 /// ```
1200 /// use crossbeam_epoch::Owned;
1201 ///
1202 /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) };
1203 /// ```
1204 fn from(b: Box<T>) -> Self {
1205 unsafe { Self::from_raw(Box::into_raw(b)) }
1206 }
1207}
1208
1209impl<T: ?Sized + Pointable> Borrow<T> for Owned<T> {
1210 fn borrow(&self) -> &T {
1211 self.deref()
1212 }
1213}
1214
1215impl<T: ?Sized + Pointable> BorrowMut<T> for Owned<T> {
1216 fn borrow_mut(&mut self) -> &mut T {
1217 self.deref_mut()
1218 }
1219}
1220
1221impl<T: ?Sized + Pointable> AsRef<T> for Owned<T> {
1222 fn as_ref(&self) -> &T {
1223 self.deref()
1224 }
1225}
1226
1227impl<T: ?Sized + Pointable> AsMut<T> for Owned<T> {
1228 fn as_mut(&mut self) -> &mut T {
1229 self.deref_mut()
1230 }
1231}
1232
1233/// A pointer to an object protected by the epoch GC.
1234///
1235/// The pointer is valid for use only during the lifetime `'g`.
1236///
1237/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
1238/// least significant bits of the address.
1239pub struct Shared<'g, T: 'g + ?Sized + Pointable> {
1240 data: usize,
1241 _marker: PhantomData<(&'g (), *const T)>,
1242}
1243
1244impl<T: ?Sized + Pointable> Clone for Shared<'_, T> {
1245 fn clone(&self) -> Self {
1246 Self {
1247 data: self.data,
1248 _marker: PhantomData,
1249 }
1250 }
1251}
1252
1253impl<T: ?Sized + Pointable> Copy for Shared<'_, T> {}
1254
1255impl<T: ?Sized + Pointable> Pointer<T> for Shared<'_, T> {
1256 #[inline]
1257 fn into_usize(self) -> usize {
1258 self.data
1259 }
1260
1261 #[inline]
1262 unsafe fn from_usize(data: usize) -> Self {
1263 Shared {
1264 data,
1265 _marker: PhantomData,
1266 }
1267 }
1268}
1269
1270impl<'g, T> Shared<'g, T> {
1271 /// Converts the pointer to a raw pointer (without the tag).
1272 ///
1273 /// # Examples
1274 ///
1275 /// ```
1276 /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1277 /// use std::sync::atomic::Ordering::SeqCst;
1278 ///
1279 /// let o = Owned::new(1234);
1280 /// let raw = &*o as *const _;
1281 /// let a = Atomic::from(o);
1282 ///
1283 /// let guard = &epoch::pin();
1284 /// let p = a.load(SeqCst, guard);
1285 /// assert_eq!(p.as_raw(), raw);
1286 /// ```
Jakub Koturfc1672b2020-12-21 17:28:14 +01001287 pub fn as_raw(&self) -> *const T {
1288 let (raw, _) = decompose_tag::<T>(self.data);
1289 raw as *const _
1290 }
1291}
1292
1293impl<'g, T: ?Sized + Pointable> Shared<'g, T> {
1294 /// Returns a new null pointer.
1295 ///
1296 /// # Examples
1297 ///
1298 /// ```
1299 /// use crossbeam_epoch::Shared;
1300 ///
1301 /// let p = Shared::<i32>::null();
1302 /// assert!(p.is_null());
1303 /// ```
1304 pub fn null() -> Shared<'g, T> {
1305 Shared {
1306 data: 0,
1307 _marker: PhantomData,
1308 }
1309 }
1310
1311 /// Returns `true` if the pointer is null.
1312 ///
1313 /// # Examples
1314 ///
1315 /// ```
1316 /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1317 /// use std::sync::atomic::Ordering::SeqCst;
1318 ///
1319 /// let a = Atomic::null();
1320 /// let guard = &epoch::pin();
1321 /// assert!(a.load(SeqCst, guard).is_null());
1322 /// a.store(Owned::new(1234), SeqCst);
1323 /// assert!(!a.load(SeqCst, guard).is_null());
1324 /// ```
Jakub Koturfc1672b2020-12-21 17:28:14 +01001325 pub fn is_null(&self) -> bool {
1326 let (raw, _) = decompose_tag::<T>(self.data);
1327 raw == 0
1328 }
1329
1330 /// Dereferences the pointer.
1331 ///
1332 /// Returns a reference to the pointee that is valid during the lifetime `'g`.
1333 ///
1334 /// # Safety
1335 ///
1336 /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory.
1337 ///
1338 /// Another concern is the possibility of data races due to lack of proper synchronization.
1339 /// For example, consider the following scenario:
1340 ///
1341 /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)`
1342 /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()`
1343 ///
1344 /// The problem is that relaxed orderings don't synchronize initialization of the object with
1345 /// the read from the second thread. This is a data race. A possible solution would be to use
1346 /// `Release` and `Acquire` orderings.
1347 ///
1348 /// # Examples
1349 ///
1350 /// ```
1351 /// use crossbeam_epoch::{self as epoch, Atomic};
1352 /// use std::sync::atomic::Ordering::SeqCst;
1353 ///
1354 /// let a = Atomic::new(1234);
1355 /// let guard = &epoch::pin();
1356 /// let p = a.load(SeqCst, guard);
1357 /// unsafe {
1358 /// assert_eq!(p.deref(), &1234);
1359 /// }
1360 /// ```
Jakub Koturfc1672b2020-12-21 17:28:14 +01001361 pub unsafe fn deref(&self) -> &'g T {
1362 let (raw, _) = decompose_tag::<T>(self.data);
1363 T::deref(raw)
1364 }
1365
1366 /// Dereferences the pointer.
1367 ///
1368 /// Returns a mutable reference to the pointee that is valid during the lifetime `'g`.
1369 ///
1370 /// # Safety
1371 ///
1372 /// * There is no guarantee that there are no more threads attempting to read/write from/to the
1373 /// actual object at the same time.
1374 ///
1375 /// The user must know that there are no concurrent accesses towards the object itself.
1376 ///
1377 /// * Other than the above, all safety concerns of `deref()` applies here.
1378 ///
1379 /// # Examples
1380 ///
1381 /// ```
1382 /// use crossbeam_epoch::{self as epoch, Atomic};
1383 /// use std::sync::atomic::Ordering::SeqCst;
1384 ///
1385 /// let a = Atomic::new(vec![1, 2, 3, 4]);
1386 /// let guard = &epoch::pin();
1387 ///
1388 /// let mut p = a.load(SeqCst, guard);
1389 /// unsafe {
1390 /// assert!(!p.is_null());
1391 /// let b = p.deref_mut();
1392 /// assert_eq!(b, &vec![1, 2, 3, 4]);
1393 /// b.push(5);
1394 /// assert_eq!(b, &vec![1, 2, 3, 4, 5]);
1395 /// }
1396 ///
1397 /// let p = a.load(SeqCst, guard);
1398 /// unsafe {
1399 /// assert_eq!(p.deref(), &vec![1, 2, 3, 4, 5]);
1400 /// }
1401 /// ```
Jakub Koturfc1672b2020-12-21 17:28:14 +01001402 pub unsafe fn deref_mut(&mut self) -> &'g mut T {
1403 let (raw, _) = decompose_tag::<T>(self.data);
1404 T::deref_mut(raw)
1405 }
1406
1407 /// Converts the pointer to a reference.
1408 ///
1409 /// Returns `None` if the pointer is null, or else a reference to the object wrapped in `Some`.
1410 ///
1411 /// # Safety
1412 ///
1413 /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory.
1414 ///
1415 /// Another concern is the possibility of data races due to lack of proper synchronization.
1416 /// For example, consider the following scenario:
1417 ///
1418 /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)`
1419 /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()`
1420 ///
1421 /// The problem is that relaxed orderings don't synchronize initialization of the object with
1422 /// the read from the second thread. This is a data race. A possible solution would be to use
1423 /// `Release` and `Acquire` orderings.
1424 ///
1425 /// # Examples
1426 ///
1427 /// ```
1428 /// use crossbeam_epoch::{self as epoch, Atomic};
1429 /// use std::sync::atomic::Ordering::SeqCst;
1430 ///
1431 /// let a = Atomic::new(1234);
1432 /// let guard = &epoch::pin();
1433 /// let p = a.load(SeqCst, guard);
1434 /// unsafe {
1435 /// assert_eq!(p.as_ref(), Some(&1234));
1436 /// }
1437 /// ```
Jakub Koturfc1672b2020-12-21 17:28:14 +01001438 pub unsafe fn as_ref(&self) -> Option<&'g T> {
1439 let (raw, _) = decompose_tag::<T>(self.data);
1440 if raw == 0 {
1441 None
1442 } else {
1443 Some(T::deref(raw))
1444 }
1445 }
1446
1447 /// Takes ownership of the pointee.
1448 ///
1449 /// # Panics
1450 ///
1451 /// Panics if this pointer is null, but only in debug mode.
1452 ///
1453 /// # Safety
1454 ///
1455 /// This method may be called only if the pointer is valid and nobody else is holding a
1456 /// reference to the same object.
1457 ///
1458 /// # Examples
1459 ///
1460 /// ```
1461 /// use crossbeam_epoch::{self as epoch, Atomic};
1462 /// use std::sync::atomic::Ordering::SeqCst;
1463 ///
1464 /// let a = Atomic::new(1234);
1465 /// unsafe {
1466 /// let guard = &epoch::unprotected();
1467 /// let p = a.load(SeqCst, guard);
1468 /// drop(p.into_owned());
1469 /// }
1470 /// ```
1471 pub unsafe fn into_owned(self) -> Owned<T> {
1472 debug_assert!(!self.is_null(), "converting a null `Shared` into `Owned`");
1473 Owned::from_usize(self.data)
1474 }
1475
1476 /// Returns the tag stored within the pointer.
1477 ///
1478 /// # Examples
1479 ///
1480 /// ```
1481 /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1482 /// use std::sync::atomic::Ordering::SeqCst;
1483 ///
1484 /// let a = Atomic::<u64>::from(Owned::new(0u64).with_tag(2));
1485 /// let guard = &epoch::pin();
1486 /// let p = a.load(SeqCst, guard);
1487 /// assert_eq!(p.tag(), 2);
1488 /// ```
Jakub Koturfc1672b2020-12-21 17:28:14 +01001489 pub fn tag(&self) -> usize {
1490 let (_, tag) = decompose_tag::<T>(self.data);
1491 tag
1492 }
1493
1494 /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the
1495 /// unused bits of the pointer to `T`.
1496 ///
1497 /// # Examples
1498 ///
1499 /// ```
1500 /// use crossbeam_epoch::{self as epoch, Atomic};
1501 /// use std::sync::atomic::Ordering::SeqCst;
1502 ///
1503 /// let a = Atomic::new(0u64);
1504 /// let guard = &epoch::pin();
1505 /// let p1 = a.load(SeqCst, guard);
1506 /// let p2 = p1.with_tag(2);
1507 ///
1508 /// assert_eq!(p1.tag(), 0);
1509 /// assert_eq!(p2.tag(), 2);
1510 /// assert_eq!(p1.as_raw(), p2.as_raw());
1511 /// ```
Jakub Koturfc1672b2020-12-21 17:28:14 +01001512 pub fn with_tag(&self, tag: usize) -> Shared<'g, T> {
1513 unsafe { Self::from_usize(compose_tag::<T>(self.data, tag)) }
1514 }
1515}
1516
1517impl<T> From<*const T> for Shared<'_, T> {
1518 /// Returns a new pointer pointing to `raw`.
1519 ///
1520 /// # Panics
1521 ///
1522 /// Panics if `raw` is not properly aligned.
1523 ///
1524 /// # Examples
1525 ///
1526 /// ```
1527 /// use crossbeam_epoch::Shared;
1528 ///
1529 /// let p = Shared::from(Box::into_raw(Box::new(1234)) as *const _);
1530 /// assert!(!p.is_null());
1531 /// ```
1532 fn from(raw: *const T) -> Self {
1533 let raw = raw as usize;
1534 ensure_aligned::<T>(raw);
1535 unsafe { Self::from_usize(raw) }
1536 }
1537}
1538
1539impl<'g, T: ?Sized + Pointable> PartialEq<Shared<'g, T>> for Shared<'g, T> {
1540 fn eq(&self, other: &Self) -> bool {
1541 self.data == other.data
1542 }
1543}
1544
1545impl<T: ?Sized + Pointable> Eq for Shared<'_, T> {}
1546
1547impl<'g, T: ?Sized + Pointable> PartialOrd<Shared<'g, T>> for Shared<'g, T> {
1548 fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
1549 self.data.partial_cmp(&other.data)
1550 }
1551}
1552
1553impl<T: ?Sized + Pointable> Ord for Shared<'_, T> {
1554 fn cmp(&self, other: &Self) -> cmp::Ordering {
1555 self.data.cmp(&other.data)
1556 }
1557}
1558
1559impl<T: ?Sized + Pointable> fmt::Debug for Shared<'_, T> {
1560 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1561 let (raw, tag) = decompose_tag::<T>(self.data);
1562
1563 f.debug_struct("Shared")
1564 .field("raw", &raw)
1565 .field("tag", &tag)
1566 .finish()
1567 }
1568}
1569
1570impl<T: ?Sized + Pointable> fmt::Pointer for Shared<'_, T> {
1571 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1572 fmt::Pointer::fmt(&(unsafe { self.deref() as *const _ }), f)
1573 }
1574}
1575
1576impl<T: ?Sized + Pointable> Default for Shared<'_, T> {
1577 fn default() -> Self {
1578 Shared::null()
1579 }
1580}
1581
Joel Galenson0d440922021-04-01 15:34:31 -07001582#[cfg(all(test, not(crossbeam_loom)))]
Jakub Koturfc1672b2020-12-21 17:28:14 +01001583mod tests {
Joel Galenson6c485dc2021-06-21 12:20:39 -07001584 use super::{Owned, Shared};
1585 use std::mem::MaybeUninit;
Jakub Koturfc1672b2020-12-21 17:28:14 +01001586
1587 #[test]
1588 fn valid_tag_i8() {
1589 Shared::<i8>::null().with_tag(0);
1590 }
1591
1592 #[test]
1593 fn valid_tag_i64() {
1594 Shared::<i64>::null().with_tag(7);
1595 }
Joel Galenson0d440922021-04-01 15:34:31 -07001596
1597 #[cfg(feature = "nightly")]
1598 #[test]
1599 fn const_atomic_null() {
1600 use super::Atomic;
Joel Galenson6c485dc2021-06-21 12:20:39 -07001601 static _U: Atomic<u8> = Atomic::<u8>::null();
1602 }
1603
1604 #[test]
1605 fn array_init() {
1606 let owned = Owned::<[MaybeUninit<usize>]>::init(10);
1607 let arr: &[MaybeUninit<usize>] = &*owned;
1608 assert_eq!(arr.len(), 10);
Joel Galenson0d440922021-04-01 15:34:31 -07001609 }
Jakub Koturfc1672b2020-12-21 17:28:14 +01001610}