alloc/
sync.rs

1#![stable(feature = "rust1", since = "1.0.0")]
2
3//! Thread-safe reference-counting pointers.
4//!
5//! See the [`Arc<T>`][Arc] documentation for more details.
6//!
7//! **Note**: This module is only available on platforms that support atomic
8//! loads and stores of pointers. This may be detected at compile time using
9//! `#[cfg(target_has_atomic = "ptr")]`.
10
11use core::any::Any;
12#[cfg(not(no_global_oom_handling))]
13use core::clone::CloneToUninit;
14use core::clone::UseCloned;
15use core::cmp::Ordering;
16use core::hash::{Hash, Hasher};
17use core::intrinsics::abort;
18#[cfg(not(no_global_oom_handling))]
19use core::iter;
20use core::marker::{PhantomData, Unsize};
21use core::mem::{self, ManuallyDrop, align_of_val_raw};
22use core::num::NonZeroUsize;
23use core::ops::{CoerceUnsized, Deref, DerefMut, DerefPure, DispatchFromDyn, LegacyReceiver};
24use core::panic::{RefUnwindSafe, UnwindSafe};
25use core::pin::{Pin, PinCoerceUnsized};
26use core::ptr::{self, NonNull};
27#[cfg(not(no_global_oom_handling))]
28use core::slice::from_raw_parts_mut;
29use core::sync::atomic::Ordering::{Acquire, Relaxed, Release};
30use core::sync::atomic::{self, Atomic};
31use core::{borrow, fmt, hint};
32
33#[cfg(not(no_global_oom_handling))]
34use crate::alloc::handle_alloc_error;
35use crate::alloc::{AllocError, Allocator, Global, Layout};
36use crate::borrow::{Cow, ToOwned};
37use crate::boxed::Box;
38use crate::rc::is_dangling;
39#[cfg(not(no_global_oom_handling))]
40use crate::string::String;
41#[cfg(not(no_global_oom_handling))]
42use crate::vec::Vec;
43
44/// A soft limit on the amount of references that may be made to an `Arc`.
45///
46/// Going above this limit will abort your program (although not
47/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
48/// Trying to go above it might call a `panic` (if not actually going above it).
49///
50/// This is a global invariant, and also applies when using a compare-exchange loop.
51///
52/// See comment in `Arc::clone`.
53const MAX_REFCOUNT: usize = (isize::MAX) as usize;
54
55/// The error in case either counter reaches above `MAX_REFCOUNT`, and we can `panic` safely.
56const INTERNAL_OVERFLOW_ERROR: &str = "Arc counter overflow";
57
58#[cfg(not(sanitize = "thread"))]
59macro_rules! acquire {
60    ($x:expr) => {
61        atomic::fence(Acquire)
62    };
63}
64
65// ThreadSanitizer does not support memory fences. To avoid false positive
66// reports in Arc / Weak implementation use atomic loads for synchronization
67// instead.
68#[cfg(sanitize = "thread")]
69macro_rules! acquire {
70    ($x:expr) => {
71        $x.load(Acquire)
72    };
73}
74
75/// A thread-safe reference-counting pointer. 'Arc' stands for 'Atomically
76/// Reference Counted'.
77///
78/// The type `Arc<T>` provides shared ownership of a value of type `T`,
79/// allocated in the heap. Invoking [`clone`][clone] on `Arc` produces
80/// a new `Arc` instance, which points to the same allocation on the heap as the
81/// source `Arc`, while increasing a reference count. When the last `Arc`
82/// pointer to a given allocation is destroyed, the value stored in that allocation (often
83/// referred to as "inner value") is also dropped.
84///
85/// Shared references in Rust disallow mutation by default, and `Arc` is no
86/// exception: you cannot generally obtain a mutable reference to something
87/// inside an `Arc`. If you do need to mutate through an `Arc`, you have several options:
88///
89/// 1. Use interior mutability with synchronization primitives like [`Mutex`][mutex],
90///    [`RwLock`][rwlock], or one of the [`Atomic`][atomic] types.
91///
92/// 2. Use clone-on-write semantics with [`Arc::make_mut`] which provides efficient mutation
93///    without requiring interior mutability. This approach clones the data only when
94///    needed (when there are multiple references) and can be more efficient when mutations
95///    are infrequent.
96///
97/// 3. Use [`Arc::get_mut`] when you know your `Arc` is not shared (has a reference count of 1),
98///    which provides direct mutable access to the inner value without any cloning.
99///
100/// ```
101/// use std::sync::Arc;
102///
103/// let mut data = Arc::new(vec![1, 2, 3]);
104///
105/// // This will clone the vector only if there are other references to it
106/// Arc::make_mut(&mut data).push(4);
107///
108/// assert_eq!(*data, vec![1, 2, 3, 4]);
109/// ```
110///
111/// **Note**: This type is only available on platforms that support atomic
112/// loads and stores of pointers, which includes all platforms that support
113/// the `std` crate but not all those which only support [`alloc`](crate).
114/// This may be detected at compile time using `#[cfg(target_has_atomic = "ptr")]`.
115///
116/// ## Thread Safety
117///
118/// Unlike [`Rc<T>`], `Arc<T>` uses atomic operations for its reference
119/// counting. This means that it is thread-safe. The disadvantage is that
120/// atomic operations are more expensive than ordinary memory accesses. If you
121/// are not sharing reference-counted allocations between threads, consider using
122/// [`Rc<T>`] for lower overhead. [`Rc<T>`] is a safe default, because the
123/// compiler will catch any attempt to send an [`Rc<T>`] between threads.
124/// However, a library might choose `Arc<T>` in order to give library consumers
125/// more flexibility.
126///
127/// `Arc<T>` will implement [`Send`] and [`Sync`] as long as the `T` implements
128/// [`Send`] and [`Sync`]. Why can't you put a non-thread-safe type `T` in an
129/// `Arc<T>` to make it thread-safe? This may be a bit counter-intuitive at
130/// first: after all, isn't the point of `Arc<T>` thread safety? The key is
131/// this: `Arc<T>` makes it thread safe to have multiple ownership of the same
132/// data, but it  doesn't add thread safety to its data. Consider
133/// <code>Arc<[RefCell\<T>]></code>. [`RefCell<T>`] isn't [`Sync`], and if `Arc<T>` was always
134/// [`Send`], <code>Arc<[RefCell\<T>]></code> would be as well. But then we'd have a problem:
135/// [`RefCell<T>`] is not thread safe; it keeps track of the borrowing count using
136/// non-atomic operations.
137///
138/// In the end, this means that you may need to pair `Arc<T>` with some sort of
139/// [`std::sync`] type, usually [`Mutex<T>`][mutex].
140///
141/// ## Breaking cycles with `Weak`
142///
143/// The [`downgrade`][downgrade] method can be used to create a non-owning
144/// [`Weak`] pointer. A [`Weak`] pointer can be [`upgrade`][upgrade]d
145/// to an `Arc`, but this will return [`None`] if the value stored in the allocation has
146/// already been dropped. In other words, `Weak` pointers do not keep the value
147/// inside the allocation alive; however, they *do* keep the allocation
148/// (the backing store for the value) alive.
149///
150/// A cycle between `Arc` pointers will never be deallocated. For this reason,
151/// [`Weak`] is used to break cycles. For example, a tree could have
152/// strong `Arc` pointers from parent nodes to children, and [`Weak`]
153/// pointers from children back to their parents.
154///
155/// # Cloning references
156///
157/// Creating a new reference from an existing reference-counted pointer is done using the
158/// `Clone` trait implemented for [`Arc<T>`][Arc] and [`Weak<T>`][Weak].
159///
160/// ```
161/// use std::sync::Arc;
162/// let foo = Arc::new(vec![1.0, 2.0, 3.0]);
163/// // The two syntaxes below are equivalent.
164/// let a = foo.clone();
165/// let b = Arc::clone(&foo);
166/// // a, b, and foo are all Arcs that point to the same memory location
167/// ```
168///
169/// ## `Deref` behavior
170///
171/// `Arc<T>` automatically dereferences to `T` (via the [`Deref`] trait),
172/// so you can call `T`'s methods on a value of type `Arc<T>`. To avoid name
173/// clashes with `T`'s methods, the methods of `Arc<T>` itself are associated
174/// functions, called using [fully qualified syntax]:
175///
176/// ```
177/// use std::sync::Arc;
178///
179/// let my_arc = Arc::new(());
180/// let my_weak = Arc::downgrade(&my_arc);
181/// ```
182///
183/// `Arc<T>`'s implementations of traits like `Clone` may also be called using
184/// fully qualified syntax. Some people prefer to use fully qualified syntax,
185/// while others prefer using method-call syntax.
186///
187/// ```
188/// use std::sync::Arc;
189///
190/// let arc = Arc::new(());
191/// // Method-call syntax
192/// let arc2 = arc.clone();
193/// // Fully qualified syntax
194/// let arc3 = Arc::clone(&arc);
195/// ```
196///
197/// [`Weak<T>`][Weak] does not auto-dereference to `T`, because the inner value may have
198/// already been dropped.
199///
200/// [`Rc<T>`]: crate::rc::Rc
201/// [clone]: Clone::clone
202/// [mutex]: ../../std/sync/struct.Mutex.html
203/// [rwlock]: ../../std/sync/struct.RwLock.html
204/// [atomic]: core::sync::atomic
205/// [downgrade]: Arc::downgrade
206/// [upgrade]: Weak::upgrade
207/// [RefCell\<T>]: core::cell::RefCell
208/// [`RefCell<T>`]: core::cell::RefCell
209/// [`std::sync`]: ../../std/sync/index.html
210/// [`Arc::clone(&from)`]: Arc::clone
211/// [fully qualified syntax]: https://doc.rust-lang.org/book/ch19-03-advanced-traits.html#fully-qualified-syntax-for-disambiguation-calling-methods-with-the-same-name
212///
213/// # Examples
214///
215/// Sharing some immutable data between threads:
216///
217/// ```
218/// use std::sync::Arc;
219/// use std::thread;
220///
221/// let five = Arc::new(5);
222///
223/// for _ in 0..10 {
224///     let five = Arc::clone(&five);
225///
226///     thread::spawn(move || {
227///         println!("{five:?}");
228///     });
229/// }
230/// ```
231///
232/// Sharing a mutable [`AtomicUsize`]:
233///
234/// [`AtomicUsize`]: core::sync::atomic::AtomicUsize "sync::atomic::AtomicUsize"
235///
236/// ```
237/// use std::sync::Arc;
238/// use std::sync::atomic::{AtomicUsize, Ordering};
239/// use std::thread;
240///
241/// let val = Arc::new(AtomicUsize::new(5));
242///
243/// for _ in 0..10 {
244///     let val = Arc::clone(&val);
245///
246///     thread::spawn(move || {
247///         let v = val.fetch_add(1, Ordering::Relaxed);
248///         println!("{v:?}");
249///     });
250/// }
251/// ```
252///
253/// See the [`rc` documentation][rc_examples] for more examples of reference
254/// counting in general.
255///
256/// [rc_examples]: crate::rc#examples
257#[doc(search_unbox)]
258#[rustc_diagnostic_item = "Arc"]
259#[stable(feature = "rust1", since = "1.0.0")]
260#[rustc_insignificant_dtor]
261pub struct Arc<
262    T: ?Sized,
263    #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
264> {
265    ptr: NonNull<ArcInner<T>>,
266    phantom: PhantomData<ArcInner<T>>,
267    alloc: A,
268}
269
270#[stable(feature = "rust1", since = "1.0.0")]
271unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Send> Send for Arc<T, A> {}
272#[stable(feature = "rust1", since = "1.0.0")]
273unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Sync> Sync for Arc<T, A> {}
274
275#[stable(feature = "catch_unwind", since = "1.9.0")]
276impl<T: RefUnwindSafe + ?Sized, A: Allocator + UnwindSafe> UnwindSafe for Arc<T, A> {}
277
278#[unstable(feature = "coerce_unsized", issue = "18598")]
279impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Arc<U, A>> for Arc<T, A> {}
280
281#[unstable(feature = "dispatch_from_dyn", issue = "none")]
282impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Arc<U>> for Arc<T> {}
283
284impl<T: ?Sized> Arc<T> {
285    unsafe fn from_inner(ptr: NonNull<ArcInner<T>>) -> Self {
286        unsafe { Self::from_inner_in(ptr, Global) }
287    }
288
289    unsafe fn from_ptr(ptr: *mut ArcInner<T>) -> Self {
290        unsafe { Self::from_ptr_in(ptr, Global) }
291    }
292}
293
294impl<T: ?Sized, A: Allocator> Arc<T, A> {
295    #[inline]
296    fn into_inner_with_allocator(this: Self) -> (NonNull<ArcInner<T>>, A) {
297        let this = mem::ManuallyDrop::new(this);
298        (this.ptr, unsafe { ptr::read(&this.alloc) })
299    }
300
301    #[inline]
302    unsafe fn from_inner_in(ptr: NonNull<ArcInner<T>>, alloc: A) -> Self {
303        Self { ptr, phantom: PhantomData, alloc }
304    }
305
306    #[inline]
307    unsafe fn from_ptr_in(ptr: *mut ArcInner<T>, alloc: A) -> Self {
308        unsafe { Self::from_inner_in(NonNull::new_unchecked(ptr), alloc) }
309    }
310}
311
312/// `Weak` is a version of [`Arc`] that holds a non-owning reference to the
313/// managed allocation.
314///
315/// The allocation is accessed by calling [`upgrade`] on the `Weak`
316/// pointer, which returns an <code>[Option]<[Arc]\<T>></code>.
317///
318/// Since a `Weak` reference does not count towards ownership, it will not
319/// prevent the value stored in the allocation from being dropped, and `Weak` itself makes no
320/// guarantees about the value still being present. Thus it may return [`None`]
321/// when [`upgrade`]d. Note however that a `Weak` reference *does* prevent the allocation
322/// itself (the backing store) from being deallocated.
323///
324/// A `Weak` pointer is useful for keeping a temporary reference to the allocation
325/// managed by [`Arc`] without preventing its inner value from being dropped. It is also used to
326/// prevent circular references between [`Arc`] pointers, since mutual owning references
327/// would never allow either [`Arc`] to be dropped. For example, a tree could
328/// have strong [`Arc`] pointers from parent nodes to children, and `Weak`
329/// pointers from children back to their parents.
330///
331/// The typical way to obtain a `Weak` pointer is to call [`Arc::downgrade`].
332///
333/// [`upgrade`]: Weak::upgrade
334#[stable(feature = "arc_weak", since = "1.4.0")]
335#[rustc_diagnostic_item = "ArcWeak"]
336pub struct Weak<
337    T: ?Sized,
338    #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
339> {
340    // This is a `NonNull` to allow optimizing the size of this type in enums,
341    // but it is not necessarily a valid pointer.
342    // `Weak::new` sets this to `usize::MAX` so that it doesn’t need
343    // to allocate space on the heap. That's not a value a real pointer
344    // will ever have because RcInner has alignment at least 2.
345    ptr: NonNull<ArcInner<T>>,
346    alloc: A,
347}
348
349#[stable(feature = "arc_weak", since = "1.4.0")]
350unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Send> Send for Weak<T, A> {}
351#[stable(feature = "arc_weak", since = "1.4.0")]
352unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Sync> Sync for Weak<T, A> {}
353
354#[unstable(feature = "coerce_unsized", issue = "18598")]
355impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Weak<U, A>> for Weak<T, A> {}
356#[unstable(feature = "dispatch_from_dyn", issue = "none")]
357impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Weak<U>> for Weak<T> {}
358
359#[stable(feature = "arc_weak", since = "1.4.0")]
360impl<T: ?Sized, A: Allocator> fmt::Debug for Weak<T, A> {
361    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
362        write!(f, "(Weak)")
363    }
364}
365
366// This is repr(C) to future-proof against possible field-reordering, which
367// would interfere with otherwise safe [into|from]_raw() of transmutable
368// inner types.
369#[repr(C)]
370struct ArcInner<T: ?Sized> {
371    strong: Atomic<usize>,
372
373    // the value usize::MAX acts as a sentinel for temporarily "locking" the
374    // ability to upgrade weak pointers or downgrade strong ones; this is used
375    // to avoid races in `make_mut` and `get_mut`.
376    weak: Atomic<usize>,
377
378    data: T,
379}
380
381/// Calculate layout for `ArcInner<T>` using the inner value's layout
382fn arcinner_layout_for_value_layout(layout: Layout) -> Layout {
383    // Calculate layout using the given value layout.
384    // Previously, layout was calculated on the expression
385    // `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
386    // reference (see #54908).
387    Layout::new::<ArcInner<()>>().extend(layout).unwrap().0.pad_to_align()
388}
389
390unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
391unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
392
393impl<T> Arc<T> {
394    /// Constructs a new `Arc<T>`.
395    ///
396    /// # Examples
397    ///
398    /// ```
399    /// use std::sync::Arc;
400    ///
401    /// let five = Arc::new(5);
402    /// ```
403    #[cfg(not(no_global_oom_handling))]
404    #[inline]
405    #[stable(feature = "rust1", since = "1.0.0")]
406    pub fn new(data: T) -> Arc<T> {
407        // Start the weak pointer count as 1 which is the weak pointer that's
408        // held by all the strong pointers (kinda), see std/rc.rs for more info
409        let x: Box<_> = Box::new(ArcInner {
410            strong: atomic::AtomicUsize::new(1),
411            weak: atomic::AtomicUsize::new(1),
412            data,
413        });
414        unsafe { Self::from_inner(Box::leak(x).into()) }
415    }
416
417    /// Constructs a new `Arc<T>` while giving you a `Weak<T>` to the allocation,
418    /// to allow you to construct a `T` which holds a weak pointer to itself.
419    ///
420    /// Generally, a structure circularly referencing itself, either directly or
421    /// indirectly, should not hold a strong reference to itself to prevent a memory leak.
422    /// Using this function, you get access to the weak pointer during the
423    /// initialization of `T`, before the `Arc<T>` is created, such that you can
424    /// clone and store it inside the `T`.
425    ///
426    /// `new_cyclic` first allocates the managed allocation for the `Arc<T>`,
427    /// then calls your closure, giving it a `Weak<T>` to this allocation,
428    /// and only afterwards completes the construction of the `Arc<T>` by placing
429    /// the `T` returned from your closure into the allocation.
430    ///
431    /// Since the new `Arc<T>` is not fully-constructed until `Arc<T>::new_cyclic`
432    /// returns, calling [`upgrade`] on the weak reference inside your closure will
433    /// fail and result in a `None` value.
434    ///
435    /// # Panics
436    ///
437    /// If `data_fn` panics, the panic is propagated to the caller, and the
438    /// temporary [`Weak<T>`] is dropped normally.
439    ///
440    /// # Example
441    ///
442    /// ```
443    /// # #![allow(dead_code)]
444    /// use std::sync::{Arc, Weak};
445    ///
446    /// struct Gadget {
447    ///     me: Weak<Gadget>,
448    /// }
449    ///
450    /// impl Gadget {
451    ///     /// Constructs a reference counted Gadget.
452    ///     fn new() -> Arc<Self> {
453    ///         // `me` is a `Weak<Gadget>` pointing at the new allocation of the
454    ///         // `Arc` we're constructing.
455    ///         Arc::new_cyclic(|me| {
456    ///             // Create the actual struct here.
457    ///             Gadget { me: me.clone() }
458    ///         })
459    ///     }
460    ///
461    ///     /// Returns a reference counted pointer to Self.
462    ///     fn me(&self) -> Arc<Self> {
463    ///         self.me.upgrade().unwrap()
464    ///     }
465    /// }
466    /// ```
467    /// [`upgrade`]: Weak::upgrade
468    #[cfg(not(no_global_oom_handling))]
469    #[inline]
470    #[stable(feature = "arc_new_cyclic", since = "1.60.0")]
471    pub fn new_cyclic<F>(data_fn: F) -> Arc<T>
472    where
473        F: FnOnce(&Weak<T>) -> T,
474    {
475        Self::new_cyclic_in(data_fn, Global)
476    }
477
478    /// Constructs a new `Arc` with uninitialized contents.
479    ///
480    /// # Examples
481    ///
482    /// ```
483    /// #![feature(get_mut_unchecked)]
484    ///
485    /// use std::sync::Arc;
486    ///
487    /// let mut five = Arc::<u32>::new_uninit();
488    ///
489    /// // Deferred initialization:
490    /// Arc::get_mut(&mut five).unwrap().write(5);
491    ///
492    /// let five = unsafe { five.assume_init() };
493    ///
494    /// assert_eq!(*five, 5)
495    /// ```
496    #[cfg(not(no_global_oom_handling))]
497    #[inline]
498    #[stable(feature = "new_uninit", since = "1.82.0")]
499    #[must_use]
500    pub fn new_uninit() -> Arc<mem::MaybeUninit<T>> {
501        unsafe {
502            Arc::from_ptr(Arc::allocate_for_layout(
503                Layout::new::<T>(),
504                |layout| Global.allocate(layout),
505                <*mut u8>::cast,
506            ))
507        }
508    }
509
510    /// Constructs a new `Arc` with uninitialized contents, with the memory
511    /// being filled with `0` bytes.
512    ///
513    /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
514    /// of this method.
515    ///
516    /// # Examples
517    ///
518    /// ```
519    /// use std::sync::Arc;
520    ///
521    /// let zero = Arc::<u32>::new_zeroed();
522    /// let zero = unsafe { zero.assume_init() };
523    ///
524    /// assert_eq!(*zero, 0)
525    /// ```
526    ///
527    /// [zeroed]: mem::MaybeUninit::zeroed
528    #[cfg(not(no_global_oom_handling))]
529    #[inline]
530    #[stable(feature = "new_zeroed_alloc", since = "CURRENT_RUSTC_VERSION")]
531    #[must_use]
532    pub fn new_zeroed() -> Arc<mem::MaybeUninit<T>> {
533        unsafe {
534            Arc::from_ptr(Arc::allocate_for_layout(
535                Layout::new::<T>(),
536                |layout| Global.allocate_zeroed(layout),
537                <*mut u8>::cast,
538            ))
539        }
540    }
541
542    /// Constructs a new `Pin<Arc<T>>`. If `T` does not implement `Unpin`, then
543    /// `data` will be pinned in memory and unable to be moved.
544    #[cfg(not(no_global_oom_handling))]
545    #[stable(feature = "pin", since = "1.33.0")]
546    #[must_use]
547    pub fn pin(data: T) -> Pin<Arc<T>> {
548        unsafe { Pin::new_unchecked(Arc::new(data)) }
549    }
550
551    /// Constructs a new `Pin<Arc<T>>`, return an error if allocation fails.
552    #[unstable(feature = "allocator_api", issue = "32838")]
553    #[inline]
554    pub fn try_pin(data: T) -> Result<Pin<Arc<T>>, AllocError> {
555        unsafe { Ok(Pin::new_unchecked(Arc::try_new(data)?)) }
556    }
557
558    /// Constructs a new `Arc<T>`, returning an error if allocation fails.
559    ///
560    /// # Examples
561    ///
562    /// ```
563    /// #![feature(allocator_api)]
564    /// use std::sync::Arc;
565    ///
566    /// let five = Arc::try_new(5)?;
567    /// # Ok::<(), std::alloc::AllocError>(())
568    /// ```
569    #[unstable(feature = "allocator_api", issue = "32838")]
570    #[inline]
571    pub fn try_new(data: T) -> Result<Arc<T>, AllocError> {
572        // Start the weak pointer count as 1 which is the weak pointer that's
573        // held by all the strong pointers (kinda), see std/rc.rs for more info
574        let x: Box<_> = Box::try_new(ArcInner {
575            strong: atomic::AtomicUsize::new(1),
576            weak: atomic::AtomicUsize::new(1),
577            data,
578        })?;
579        unsafe { Ok(Self::from_inner(Box::leak(x).into())) }
580    }
581
582    /// Constructs a new `Arc` with uninitialized contents, returning an error
583    /// if allocation fails.
584    ///
585    /// # Examples
586    ///
587    /// ```
588    /// #![feature(allocator_api)]
589    /// #![feature(get_mut_unchecked)]
590    ///
591    /// use std::sync::Arc;
592    ///
593    /// let mut five = Arc::<u32>::try_new_uninit()?;
594    ///
595    /// // Deferred initialization:
596    /// Arc::get_mut(&mut five).unwrap().write(5);
597    ///
598    /// let five = unsafe { five.assume_init() };
599    ///
600    /// assert_eq!(*five, 5);
601    /// # Ok::<(), std::alloc::AllocError>(())
602    /// ```
603    #[unstable(feature = "allocator_api", issue = "32838")]
604    pub fn try_new_uninit() -> Result<Arc<mem::MaybeUninit<T>>, AllocError> {
605        unsafe {
606            Ok(Arc::from_ptr(Arc::try_allocate_for_layout(
607                Layout::new::<T>(),
608                |layout| Global.allocate(layout),
609                <*mut u8>::cast,
610            )?))
611        }
612    }
613
614    /// Constructs a new `Arc` with uninitialized contents, with the memory
615    /// being filled with `0` bytes, returning an error if allocation fails.
616    ///
617    /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
618    /// of this method.
619    ///
620    /// # Examples
621    ///
622    /// ```
623    /// #![feature( allocator_api)]
624    ///
625    /// use std::sync::Arc;
626    ///
627    /// let zero = Arc::<u32>::try_new_zeroed()?;
628    /// let zero = unsafe { zero.assume_init() };
629    ///
630    /// assert_eq!(*zero, 0);
631    /// # Ok::<(), std::alloc::AllocError>(())
632    /// ```
633    ///
634    /// [zeroed]: mem::MaybeUninit::zeroed
635    #[unstable(feature = "allocator_api", issue = "32838")]
636    pub fn try_new_zeroed() -> Result<Arc<mem::MaybeUninit<T>>, AllocError> {
637        unsafe {
638            Ok(Arc::from_ptr(Arc::try_allocate_for_layout(
639                Layout::new::<T>(),
640                |layout| Global.allocate_zeroed(layout),
641                <*mut u8>::cast,
642            )?))
643        }
644    }
645}
646
647impl<T, A: Allocator> Arc<T, A> {
648    /// Constructs a new `Arc<T>` in the provided allocator.
649    ///
650    /// # Examples
651    ///
652    /// ```
653    /// #![feature(allocator_api)]
654    ///
655    /// use std::sync::Arc;
656    /// use std::alloc::System;
657    ///
658    /// let five = Arc::new_in(5, System);
659    /// ```
660    #[inline]
661    #[cfg(not(no_global_oom_handling))]
662    #[unstable(feature = "allocator_api", issue = "32838")]
663    pub fn new_in(data: T, alloc: A) -> Arc<T, A> {
664        // Start the weak pointer count as 1 which is the weak pointer that's
665        // held by all the strong pointers (kinda), see std/rc.rs for more info
666        let x = Box::new_in(
667            ArcInner {
668                strong: atomic::AtomicUsize::new(1),
669                weak: atomic::AtomicUsize::new(1),
670                data,
671            },
672            alloc,
673        );
674        let (ptr, alloc) = Box::into_unique(x);
675        unsafe { Self::from_inner_in(ptr.into(), alloc) }
676    }
677
678    /// Constructs a new `Arc` with uninitialized contents in the provided allocator.
679    ///
680    /// # Examples
681    ///
682    /// ```
683    /// #![feature(get_mut_unchecked)]
684    /// #![feature(allocator_api)]
685    ///
686    /// use std::sync::Arc;
687    /// use std::alloc::System;
688    ///
689    /// let mut five = Arc::<u32, _>::new_uninit_in(System);
690    ///
691    /// let five = unsafe {
692    ///     // Deferred initialization:
693    ///     Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
694    ///
695    ///     five.assume_init()
696    /// };
697    ///
698    /// assert_eq!(*five, 5)
699    /// ```
700    #[cfg(not(no_global_oom_handling))]
701    #[unstable(feature = "allocator_api", issue = "32838")]
702    #[inline]
703    pub fn new_uninit_in(alloc: A) -> Arc<mem::MaybeUninit<T>, A> {
704        unsafe {
705            Arc::from_ptr_in(
706                Arc::allocate_for_layout(
707                    Layout::new::<T>(),
708                    |layout| alloc.allocate(layout),
709                    <*mut u8>::cast,
710                ),
711                alloc,
712            )
713        }
714    }
715
716    /// Constructs a new `Arc` with uninitialized contents, with the memory
717    /// being filled with `0` bytes, in the provided allocator.
718    ///
719    /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
720    /// of this method.
721    ///
722    /// # Examples
723    ///
724    /// ```
725    /// #![feature(allocator_api)]
726    ///
727    /// use std::sync::Arc;
728    /// use std::alloc::System;
729    ///
730    /// let zero = Arc::<u32, _>::new_zeroed_in(System);
731    /// let zero = unsafe { zero.assume_init() };
732    ///
733    /// assert_eq!(*zero, 0)
734    /// ```
735    ///
736    /// [zeroed]: mem::MaybeUninit::zeroed
737    #[cfg(not(no_global_oom_handling))]
738    #[unstable(feature = "allocator_api", issue = "32838")]
739    #[inline]
740    pub fn new_zeroed_in(alloc: A) -> Arc<mem::MaybeUninit<T>, A> {
741        unsafe {
742            Arc::from_ptr_in(
743                Arc::allocate_for_layout(
744                    Layout::new::<T>(),
745                    |layout| alloc.allocate_zeroed(layout),
746                    <*mut u8>::cast,
747                ),
748                alloc,
749            )
750        }
751    }
752
753    /// Constructs a new `Arc<T, A>` in the given allocator while giving you a `Weak<T, A>` to the allocation,
754    /// to allow you to construct a `T` which holds a weak pointer to itself.
755    ///
756    /// Generally, a structure circularly referencing itself, either directly or
757    /// indirectly, should not hold a strong reference to itself to prevent a memory leak.
758    /// Using this function, you get access to the weak pointer during the
759    /// initialization of `T`, before the `Arc<T, A>` is created, such that you can
760    /// clone and store it inside the `T`.
761    ///
762    /// `new_cyclic_in` first allocates the managed allocation for the `Arc<T, A>`,
763    /// then calls your closure, giving it a `Weak<T, A>` to this allocation,
764    /// and only afterwards completes the construction of the `Arc<T, A>` by placing
765    /// the `T` returned from your closure into the allocation.
766    ///
767    /// Since the new `Arc<T, A>` is not fully-constructed until `Arc<T, A>::new_cyclic_in`
768    /// returns, calling [`upgrade`] on the weak reference inside your closure will
769    /// fail and result in a `None` value.
770    ///
771    /// # Panics
772    ///
773    /// If `data_fn` panics, the panic is propagated to the caller, and the
774    /// temporary [`Weak<T>`] is dropped normally.
775    ///
776    /// # Example
777    ///
778    /// See [`new_cyclic`]
779    ///
780    /// [`new_cyclic`]: Arc::new_cyclic
781    /// [`upgrade`]: Weak::upgrade
782    #[cfg(not(no_global_oom_handling))]
783    #[inline]
784    #[unstable(feature = "allocator_api", issue = "32838")]
785    pub fn new_cyclic_in<F>(data_fn: F, alloc: A) -> Arc<T, A>
786    where
787        F: FnOnce(&Weak<T, A>) -> T,
788    {
789        // Construct the inner in the "uninitialized" state with a single
790        // weak reference.
791        let (uninit_raw_ptr, alloc) = Box::into_raw_with_allocator(Box::new_in(
792            ArcInner {
793                strong: atomic::AtomicUsize::new(0),
794                weak: atomic::AtomicUsize::new(1),
795                data: mem::MaybeUninit::<T>::uninit(),
796            },
797            alloc,
798        ));
799        let uninit_ptr: NonNull<_> = (unsafe { &mut *uninit_raw_ptr }).into();
800        let init_ptr: NonNull<ArcInner<T>> = uninit_ptr.cast();
801
802        let weak = Weak { ptr: init_ptr, alloc };
803
804        // It's important we don't give up ownership of the weak pointer, or
805        // else the memory might be freed by the time `data_fn` returns. If
806        // we really wanted to pass ownership, we could create an additional
807        // weak pointer for ourselves, but this would result in additional
808        // updates to the weak reference count which might not be necessary
809        // otherwise.
810        let data = data_fn(&weak);
811
812        // Now we can properly initialize the inner value and turn our weak
813        // reference into a strong reference.
814        let strong = unsafe {
815            let inner = init_ptr.as_ptr();
816            ptr::write(&raw mut (*inner).data, data);
817
818            // The above write to the data field must be visible to any threads which
819            // observe a non-zero strong count. Therefore we need at least "Release" ordering
820            // in order to synchronize with the `compare_exchange_weak` in `Weak::upgrade`.
821            //
822            // "Acquire" ordering is not required. When considering the possible behaviors
823            // of `data_fn` we only need to look at what it could do with a reference to a
824            // non-upgradeable `Weak`:
825            // - It can *clone* the `Weak`, increasing the weak reference count.
826            // - It can drop those clones, decreasing the weak reference count (but never to zero).
827            //
828            // These side effects do not impact us in any way, and no other side effects are
829            // possible with safe code alone.
830            let prev_value = (*inner).strong.fetch_add(1, Release);
831            debug_assert_eq!(prev_value, 0, "No prior strong references should exist");
832
833            // Strong references should collectively own a shared weak reference,
834            // so don't run the destructor for our old weak reference.
835            // Calling into_raw_with_allocator has the double effect of giving us back the allocator,
836            // and forgetting the weak reference.
837            let alloc = weak.into_raw_with_allocator().1;
838
839            Arc::from_inner_in(init_ptr, alloc)
840        };
841
842        strong
843    }
844
845    /// Constructs a new `Pin<Arc<T, A>>` in the provided allocator. If `T` does not implement `Unpin`,
846    /// then `data` will be pinned in memory and unable to be moved.
847    #[cfg(not(no_global_oom_handling))]
848    #[unstable(feature = "allocator_api", issue = "32838")]
849    #[inline]
850    pub fn pin_in(data: T, alloc: A) -> Pin<Arc<T, A>>
851    where
852        A: 'static,
853    {
854        unsafe { Pin::new_unchecked(Arc::new_in(data, alloc)) }
855    }
856
857    /// Constructs a new `Pin<Arc<T, A>>` in the provided allocator, return an error if allocation
858    /// fails.
859    #[inline]
860    #[unstable(feature = "allocator_api", issue = "32838")]
861    pub fn try_pin_in(data: T, alloc: A) -> Result<Pin<Arc<T, A>>, AllocError>
862    where
863        A: 'static,
864    {
865        unsafe { Ok(Pin::new_unchecked(Arc::try_new_in(data, alloc)?)) }
866    }
867
868    /// Constructs a new `Arc<T, A>` in the provided allocator, returning an error if allocation fails.
869    ///
870    /// # Examples
871    ///
872    /// ```
873    /// #![feature(allocator_api)]
874    ///
875    /// use std::sync::Arc;
876    /// use std::alloc::System;
877    ///
878    /// let five = Arc::try_new_in(5, System)?;
879    /// # Ok::<(), std::alloc::AllocError>(())
880    /// ```
881    #[inline]
882    #[unstable(feature = "allocator_api", issue = "32838")]
883    #[inline]
884    pub fn try_new_in(data: T, alloc: A) -> Result<Arc<T, A>, AllocError> {
885        // Start the weak pointer count as 1 which is the weak pointer that's
886        // held by all the strong pointers (kinda), see std/rc.rs for more info
887        let x = Box::try_new_in(
888            ArcInner {
889                strong: atomic::AtomicUsize::new(1),
890                weak: atomic::AtomicUsize::new(1),
891                data,
892            },
893            alloc,
894        )?;
895        let (ptr, alloc) = Box::into_unique(x);
896        Ok(unsafe { Self::from_inner_in(ptr.into(), alloc) })
897    }
898
899    /// Constructs a new `Arc` with uninitialized contents, in the provided allocator, returning an
900    /// error if allocation fails.
901    ///
902    /// # Examples
903    ///
904    /// ```
905    /// #![feature(allocator_api)]
906    /// #![feature(get_mut_unchecked)]
907    ///
908    /// use std::sync::Arc;
909    /// use std::alloc::System;
910    ///
911    /// let mut five = Arc::<u32, _>::try_new_uninit_in(System)?;
912    ///
913    /// let five = unsafe {
914    ///     // Deferred initialization:
915    ///     Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
916    ///
917    ///     five.assume_init()
918    /// };
919    ///
920    /// assert_eq!(*five, 5);
921    /// # Ok::<(), std::alloc::AllocError>(())
922    /// ```
923    #[unstable(feature = "allocator_api", issue = "32838")]
924    #[inline]
925    pub fn try_new_uninit_in(alloc: A) -> Result<Arc<mem::MaybeUninit<T>, A>, AllocError> {
926        unsafe {
927            Ok(Arc::from_ptr_in(
928                Arc::try_allocate_for_layout(
929                    Layout::new::<T>(),
930                    |layout| alloc.allocate(layout),
931                    <*mut u8>::cast,
932                )?,
933                alloc,
934            ))
935        }
936    }
937
938    /// Constructs a new `Arc` with uninitialized contents, with the memory
939    /// being filled with `0` bytes, in the provided allocator, returning an error if allocation
940    /// fails.
941    ///
942    /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
943    /// of this method.
944    ///
945    /// # Examples
946    ///
947    /// ```
948    /// #![feature(allocator_api)]
949    ///
950    /// use std::sync::Arc;
951    /// use std::alloc::System;
952    ///
953    /// let zero = Arc::<u32, _>::try_new_zeroed_in(System)?;
954    /// let zero = unsafe { zero.assume_init() };
955    ///
956    /// assert_eq!(*zero, 0);
957    /// # Ok::<(), std::alloc::AllocError>(())
958    /// ```
959    ///
960    /// [zeroed]: mem::MaybeUninit::zeroed
961    #[unstable(feature = "allocator_api", issue = "32838")]
962    #[inline]
963    pub fn try_new_zeroed_in(alloc: A) -> Result<Arc<mem::MaybeUninit<T>, A>, AllocError> {
964        unsafe {
965            Ok(Arc::from_ptr_in(
966                Arc::try_allocate_for_layout(
967                    Layout::new::<T>(),
968                    |layout| alloc.allocate_zeroed(layout),
969                    <*mut u8>::cast,
970                )?,
971                alloc,
972            ))
973        }
974    }
975    /// Returns the inner value, if the `Arc` has exactly one strong reference.
976    ///
977    /// Otherwise, an [`Err`] is returned with the same `Arc` that was
978    /// passed in.
979    ///
980    /// This will succeed even if there are outstanding weak references.
981    ///
982    /// It is strongly recommended to use [`Arc::into_inner`] instead if you don't
983    /// keep the `Arc` in the [`Err`] case.
984    /// Immediately dropping the [`Err`]-value, as the expression
985    /// `Arc::try_unwrap(this).ok()` does, can cause the strong count to
986    /// drop to zero and the inner value of the `Arc` to be dropped.
987    /// For instance, if two threads execute such an expression in parallel,
988    /// there is a race condition without the possibility of unsafety:
989    /// The threads could first both check whether they own the last instance
990    /// in `Arc::try_unwrap`, determine that they both do not, and then both
991    /// discard and drop their instance in the call to [`ok`][`Result::ok`].
992    /// In this scenario, the value inside the `Arc` is safely destroyed
993    /// by exactly one of the threads, but neither thread will ever be able
994    /// to use the value.
995    ///
996    /// # Examples
997    ///
998    /// ```
999    /// use std::sync::Arc;
1000    ///
1001    /// let x = Arc::new(3);
1002    /// assert_eq!(Arc::try_unwrap(x), Ok(3));
1003    ///
1004    /// let x = Arc::new(4);
1005    /// let _y = Arc::clone(&x);
1006    /// assert_eq!(*Arc::try_unwrap(x).unwrap_err(), 4);
1007    /// ```
1008    #[inline]
1009    #[stable(feature = "arc_unique", since = "1.4.0")]
1010    pub fn try_unwrap(this: Self) -> Result<T, Self> {
1011        if this.inner().strong.compare_exchange(1, 0, Relaxed, Relaxed).is_err() {
1012            return Err(this);
1013        }
1014
1015        acquire!(this.inner().strong);
1016
1017        let this = ManuallyDrop::new(this);
1018        let elem: T = unsafe { ptr::read(&this.ptr.as_ref().data) };
1019        let alloc: A = unsafe { ptr::read(&this.alloc) }; // copy the allocator
1020
1021        // Make a weak pointer to clean up the implicit strong-weak reference
1022        let _weak = Weak { ptr: this.ptr, alloc };
1023
1024        Ok(elem)
1025    }
1026
1027    /// Returns the inner value, if the `Arc` has exactly one strong reference.
1028    ///
1029    /// Otherwise, [`None`] is returned and the `Arc` is dropped.
1030    ///
1031    /// This will succeed even if there are outstanding weak references.
1032    ///
1033    /// If `Arc::into_inner` is called on every clone of this `Arc`,
1034    /// it is guaranteed that exactly one of the calls returns the inner value.
1035    /// This means in particular that the inner value is not dropped.
1036    ///
1037    /// [`Arc::try_unwrap`] is conceptually similar to `Arc::into_inner`, but it
1038    /// is meant for different use-cases. If used as a direct replacement
1039    /// for `Arc::into_inner` anyway, such as with the expression
1040    /// <code>[Arc::try_unwrap]\(this).[ok][Result::ok]()</code>, then it does
1041    /// **not** give the same guarantee as described in the previous paragraph.
1042    /// For more information, see the examples below and read the documentation
1043    /// of [`Arc::try_unwrap`].
1044    ///
1045    /// # Examples
1046    ///
1047    /// Minimal example demonstrating the guarantee that `Arc::into_inner` gives.
1048    /// ```
1049    /// use std::sync::Arc;
1050    ///
1051    /// let x = Arc::new(3);
1052    /// let y = Arc::clone(&x);
1053    ///
1054    /// // Two threads calling `Arc::into_inner` on both clones of an `Arc`:
1055    /// let x_thread = std::thread::spawn(|| Arc::into_inner(x));
1056    /// let y_thread = std::thread::spawn(|| Arc::into_inner(y));
1057    ///
1058    /// let x_inner_value = x_thread.join().unwrap();
1059    /// let y_inner_value = y_thread.join().unwrap();
1060    ///
1061    /// // One of the threads is guaranteed to receive the inner value:
1062    /// assert!(matches!(
1063    ///     (x_inner_value, y_inner_value),
1064    ///     (None, Some(3)) | (Some(3), None)
1065    /// ));
1066    /// // The result could also be `(None, None)` if the threads called
1067    /// // `Arc::try_unwrap(x).ok()` and `Arc::try_unwrap(y).ok()` instead.
1068    /// ```
1069    ///
1070    /// A more practical example demonstrating the need for `Arc::into_inner`:
1071    /// ```
1072    /// use std::sync::Arc;
1073    ///
1074    /// // Definition of a simple singly linked list using `Arc`:
1075    /// #[derive(Clone)]
1076    /// struct LinkedList<T>(Option<Arc<Node<T>>>);
1077    /// struct Node<T>(T, Option<Arc<Node<T>>>);
1078    ///
1079    /// // Dropping a long `LinkedList<T>` relying on the destructor of `Arc`
1080    /// // can cause a stack overflow. To prevent this, we can provide a
1081    /// // manual `Drop` implementation that does the destruction in a loop:
1082    /// impl<T> Drop for LinkedList<T> {
1083    ///     fn drop(&mut self) {
1084    ///         let mut link = self.0.take();
1085    ///         while let Some(arc_node) = link.take() {
1086    ///             if let Some(Node(_value, next)) = Arc::into_inner(arc_node) {
1087    ///                 link = next;
1088    ///             }
1089    ///         }
1090    ///     }
1091    /// }
1092    ///
1093    /// // Implementation of `new` and `push` omitted
1094    /// impl<T> LinkedList<T> {
1095    ///     /* ... */
1096    /// #   fn new() -> Self {
1097    /// #       LinkedList(None)
1098    /// #   }
1099    /// #   fn push(&mut self, x: T) {
1100    /// #       self.0 = Some(Arc::new(Node(x, self.0.take())));
1101    /// #   }
1102    /// }
1103    ///
1104    /// // The following code could have still caused a stack overflow
1105    /// // despite the manual `Drop` impl if that `Drop` impl had used
1106    /// // `Arc::try_unwrap(arc).ok()` instead of `Arc::into_inner(arc)`.
1107    ///
1108    /// // Create a long list and clone it
1109    /// let mut x = LinkedList::new();
1110    /// let size = 100000;
1111    /// # let size = if cfg!(miri) { 100 } else { size };
1112    /// for i in 0..size {
1113    ///     x.push(i); // Adds i to the front of x
1114    /// }
1115    /// let y = x.clone();
1116    ///
1117    /// // Drop the clones in parallel
1118    /// let x_thread = std::thread::spawn(|| drop(x));
1119    /// let y_thread = std::thread::spawn(|| drop(y));
1120    /// x_thread.join().unwrap();
1121    /// y_thread.join().unwrap();
1122    /// ```
1123    #[inline]
1124    #[stable(feature = "arc_into_inner", since = "1.70.0")]
1125    pub fn into_inner(this: Self) -> Option<T> {
1126        // Make sure that the ordinary `Drop` implementation isn’t called as well
1127        let mut this = mem::ManuallyDrop::new(this);
1128
1129        // Following the implementation of `drop` and `drop_slow`
1130        if this.inner().strong.fetch_sub(1, Release) != 1 {
1131            return None;
1132        }
1133
1134        acquire!(this.inner().strong);
1135
1136        // SAFETY: This mirrors the line
1137        //
1138        //     unsafe { ptr::drop_in_place(Self::get_mut_unchecked(self)) };
1139        //
1140        // in `drop_slow`. Instead of dropping the value behind the pointer,
1141        // it is read and eventually returned; `ptr::read` has the same
1142        // safety conditions as `ptr::drop_in_place`.
1143
1144        let inner = unsafe { ptr::read(Self::get_mut_unchecked(&mut this)) };
1145        let alloc = unsafe { ptr::read(&this.alloc) };
1146
1147        drop(Weak { ptr: this.ptr, alloc });
1148
1149        Some(inner)
1150    }
1151}
1152
1153impl<T> Arc<[T]> {
1154    /// Constructs a new atomically reference-counted slice with uninitialized contents.
1155    ///
1156    /// # Examples
1157    ///
1158    /// ```
1159    /// #![feature(get_mut_unchecked)]
1160    ///
1161    /// use std::sync::Arc;
1162    ///
1163    /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
1164    ///
1165    /// // Deferred initialization:
1166    /// let data = Arc::get_mut(&mut values).unwrap();
1167    /// data[0].write(1);
1168    /// data[1].write(2);
1169    /// data[2].write(3);
1170    ///
1171    /// let values = unsafe { values.assume_init() };
1172    ///
1173    /// assert_eq!(*values, [1, 2, 3])
1174    /// ```
1175    #[cfg(not(no_global_oom_handling))]
1176    #[inline]
1177    #[stable(feature = "new_uninit", since = "1.82.0")]
1178    #[must_use]
1179    pub fn new_uninit_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
1180        unsafe { Arc::from_ptr(Arc::allocate_for_slice(len)) }
1181    }
1182
1183    /// Constructs a new atomically reference-counted slice with uninitialized contents, with the memory being
1184    /// filled with `0` bytes.
1185    ///
1186    /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
1187    /// incorrect usage of this method.
1188    ///
1189    /// # Examples
1190    ///
1191    /// ```
1192    /// use std::sync::Arc;
1193    ///
1194    /// let values = Arc::<[u32]>::new_zeroed_slice(3);
1195    /// let values = unsafe { values.assume_init() };
1196    ///
1197    /// assert_eq!(*values, [0, 0, 0])
1198    /// ```
1199    ///
1200    /// [zeroed]: mem::MaybeUninit::zeroed
1201    #[cfg(not(no_global_oom_handling))]
1202    #[inline]
1203    #[stable(feature = "new_zeroed_alloc", since = "CURRENT_RUSTC_VERSION")]
1204    #[must_use]
1205    pub fn new_zeroed_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
1206        unsafe {
1207            Arc::from_ptr(Arc::allocate_for_layout(
1208                Layout::array::<T>(len).unwrap(),
1209                |layout| Global.allocate_zeroed(layout),
1210                |mem| {
1211                    ptr::slice_from_raw_parts_mut(mem as *mut T, len)
1212                        as *mut ArcInner<[mem::MaybeUninit<T>]>
1213                },
1214            ))
1215        }
1216    }
1217
1218    /// Converts the reference-counted slice into a reference-counted array.
1219    ///
1220    /// This operation does not reallocate; the underlying array of the slice is simply reinterpreted as an array type.
1221    ///
1222    /// If `N` is not exactly equal to the length of `self`, then this method returns `None`.
1223    #[unstable(feature = "slice_as_array", issue = "133508")]
1224    #[inline]
1225    #[must_use]
1226    pub fn into_array<const N: usize>(self) -> Option<Arc<[T; N]>> {
1227        if self.len() == N {
1228            let ptr = Self::into_raw(self) as *const [T; N];
1229
1230            // SAFETY: The underlying array of a slice has the exact same layout as an actual array `[T; N]` if `N` is equal to the slice's length.
1231            let me = unsafe { Arc::from_raw(ptr) };
1232            Some(me)
1233        } else {
1234            None
1235        }
1236    }
1237}
1238
1239impl<T, A: Allocator> Arc<[T], A> {
1240    /// Constructs a new atomically reference-counted slice with uninitialized contents in the
1241    /// provided allocator.
1242    ///
1243    /// # Examples
1244    ///
1245    /// ```
1246    /// #![feature(get_mut_unchecked)]
1247    /// #![feature(allocator_api)]
1248    ///
1249    /// use std::sync::Arc;
1250    /// use std::alloc::System;
1251    ///
1252    /// let mut values = Arc::<[u32], _>::new_uninit_slice_in(3, System);
1253    ///
1254    /// let values = unsafe {
1255    ///     // Deferred initialization:
1256    ///     Arc::get_mut_unchecked(&mut values)[0].as_mut_ptr().write(1);
1257    ///     Arc::get_mut_unchecked(&mut values)[1].as_mut_ptr().write(2);
1258    ///     Arc::get_mut_unchecked(&mut values)[2].as_mut_ptr().write(3);
1259    ///
1260    ///     values.assume_init()
1261    /// };
1262    ///
1263    /// assert_eq!(*values, [1, 2, 3])
1264    /// ```
1265    #[cfg(not(no_global_oom_handling))]
1266    #[unstable(feature = "allocator_api", issue = "32838")]
1267    #[inline]
1268    pub fn new_uninit_slice_in(len: usize, alloc: A) -> Arc<[mem::MaybeUninit<T>], A> {
1269        unsafe { Arc::from_ptr_in(Arc::allocate_for_slice_in(len, &alloc), alloc) }
1270    }
1271
1272    /// Constructs a new atomically reference-counted slice with uninitialized contents, with the memory being
1273    /// filled with `0` bytes, in the provided allocator.
1274    ///
1275    /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
1276    /// incorrect usage of this method.
1277    ///
1278    /// # Examples
1279    ///
1280    /// ```
1281    /// #![feature(allocator_api)]
1282    ///
1283    /// use std::sync::Arc;
1284    /// use std::alloc::System;
1285    ///
1286    /// let values = Arc::<[u32], _>::new_zeroed_slice_in(3, System);
1287    /// let values = unsafe { values.assume_init() };
1288    ///
1289    /// assert_eq!(*values, [0, 0, 0])
1290    /// ```
1291    ///
1292    /// [zeroed]: mem::MaybeUninit::zeroed
1293    #[cfg(not(no_global_oom_handling))]
1294    #[unstable(feature = "allocator_api", issue = "32838")]
1295    #[inline]
1296    pub fn new_zeroed_slice_in(len: usize, alloc: A) -> Arc<[mem::MaybeUninit<T>], A> {
1297        unsafe {
1298            Arc::from_ptr_in(
1299                Arc::allocate_for_layout(
1300                    Layout::array::<T>(len).unwrap(),
1301                    |layout| alloc.allocate_zeroed(layout),
1302                    |mem| {
1303                        ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len)
1304                            as *mut ArcInner<[mem::MaybeUninit<T>]>
1305                    },
1306                ),
1307                alloc,
1308            )
1309        }
1310    }
1311}
1312
1313impl<T, A: Allocator> Arc<mem::MaybeUninit<T>, A> {
1314    /// Converts to `Arc<T>`.
1315    ///
1316    /// # Safety
1317    ///
1318    /// As with [`MaybeUninit::assume_init`],
1319    /// it is up to the caller to guarantee that the inner value
1320    /// really is in an initialized state.
1321    /// Calling this when the content is not yet fully initialized
1322    /// causes immediate undefined behavior.
1323    ///
1324    /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
1325    ///
1326    /// # Examples
1327    ///
1328    /// ```
1329    /// #![feature(get_mut_unchecked)]
1330    ///
1331    /// use std::sync::Arc;
1332    ///
1333    /// let mut five = Arc::<u32>::new_uninit();
1334    ///
1335    /// // Deferred initialization:
1336    /// Arc::get_mut(&mut five).unwrap().write(5);
1337    ///
1338    /// let five = unsafe { five.assume_init() };
1339    ///
1340    /// assert_eq!(*five, 5)
1341    /// ```
1342    #[stable(feature = "new_uninit", since = "1.82.0")]
1343    #[must_use = "`self` will be dropped if the result is not used"]
1344    #[inline]
1345    pub unsafe fn assume_init(self) -> Arc<T, A> {
1346        let (ptr, alloc) = Arc::into_inner_with_allocator(self);
1347        unsafe { Arc::from_inner_in(ptr.cast(), alloc) }
1348    }
1349}
1350
1351impl<T, A: Allocator> Arc<[mem::MaybeUninit<T>], A> {
1352    /// Converts to `Arc<[T]>`.
1353    ///
1354    /// # Safety
1355    ///
1356    /// As with [`MaybeUninit::assume_init`],
1357    /// it is up to the caller to guarantee that the inner value
1358    /// really is in an initialized state.
1359    /// Calling this when the content is not yet fully initialized
1360    /// causes immediate undefined behavior.
1361    ///
1362    /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
1363    ///
1364    /// # Examples
1365    ///
1366    /// ```
1367    /// #![feature(get_mut_unchecked)]
1368    ///
1369    /// use std::sync::Arc;
1370    ///
1371    /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
1372    ///
1373    /// // Deferred initialization:
1374    /// let data = Arc::get_mut(&mut values).unwrap();
1375    /// data[0].write(1);
1376    /// data[1].write(2);
1377    /// data[2].write(3);
1378    ///
1379    /// let values = unsafe { values.assume_init() };
1380    ///
1381    /// assert_eq!(*values, [1, 2, 3])
1382    /// ```
1383    #[stable(feature = "new_uninit", since = "1.82.0")]
1384    #[must_use = "`self` will be dropped if the result is not used"]
1385    #[inline]
1386    pub unsafe fn assume_init(self) -> Arc<[T], A> {
1387        let (ptr, alloc) = Arc::into_inner_with_allocator(self);
1388        unsafe { Arc::from_ptr_in(ptr.as_ptr() as _, alloc) }
1389    }
1390}
1391
1392impl<T: ?Sized> Arc<T> {
1393    /// Constructs an `Arc<T>` from a raw pointer.
1394    ///
1395    /// The raw pointer must have been previously returned by a call to
1396    /// [`Arc<U>::into_raw`][into_raw] with the following requirements:
1397    ///
1398    /// * If `U` is sized, it must have the same size and alignment as `T`. This
1399    ///   is trivially true if `U` is `T`.
1400    /// * If `U` is unsized, its data pointer must have the same size and
1401    ///   alignment as `T`. This is trivially true if `Arc<U>` was constructed
1402    ///   through `Arc<T>` and then converted to `Arc<U>` through an [unsized
1403    ///   coercion].
1404    ///
1405    /// Note that if `U` or `U`'s data pointer is not `T` but has the same size
1406    /// and alignment, this is basically like transmuting references of
1407    /// different types. See [`mem::transmute`][transmute] for more information
1408    /// on what restrictions apply in this case.
1409    ///
1410    /// The raw pointer must point to a block of memory allocated by the global allocator.
1411    ///
1412    /// The user of `from_raw` has to make sure a specific value of `T` is only
1413    /// dropped once.
1414    ///
1415    /// This function is unsafe because improper use may lead to memory unsafety,
1416    /// even if the returned `Arc<T>` is never accessed.
1417    ///
1418    /// [into_raw]: Arc::into_raw
1419    /// [transmute]: core::mem::transmute
1420    /// [unsized coercion]: https://doc.rust-lang.org/reference/type-coercions.html#unsized-coercions
1421    ///
1422    /// # Examples
1423    ///
1424    /// ```
1425    /// use std::sync::Arc;
1426    ///
1427    /// let x = Arc::new("hello".to_owned());
1428    /// let x_ptr = Arc::into_raw(x);
1429    ///
1430    /// unsafe {
1431    ///     // Convert back to an `Arc` to prevent leak.
1432    ///     let x = Arc::from_raw(x_ptr);
1433    ///     assert_eq!(&*x, "hello");
1434    ///
1435    ///     // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe.
1436    /// }
1437    ///
1438    /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
1439    /// ```
1440    ///
1441    /// Convert a slice back into its original array:
1442    ///
1443    /// ```
1444    /// use std::sync::Arc;
1445    ///
1446    /// let x: Arc<[u32]> = Arc::new([1, 2, 3]);
1447    /// let x_ptr: *const [u32] = Arc::into_raw(x);
1448    ///
1449    /// unsafe {
1450    ///     let x: Arc<[u32; 3]> = Arc::from_raw(x_ptr.cast::<[u32; 3]>());
1451    ///     assert_eq!(&*x, &[1, 2, 3]);
1452    /// }
1453    /// ```
1454    #[inline]
1455    #[stable(feature = "rc_raw", since = "1.17.0")]
1456    pub unsafe fn from_raw(ptr: *const T) -> Self {
1457        unsafe { Arc::from_raw_in(ptr, Global) }
1458    }
1459
1460    /// Consumes the `Arc`, returning the wrapped pointer.
1461    ///
1462    /// To avoid a memory leak the pointer must be converted back to an `Arc` using
1463    /// [`Arc::from_raw`].
1464    ///
1465    /// # Examples
1466    ///
1467    /// ```
1468    /// use std::sync::Arc;
1469    ///
1470    /// let x = Arc::new("hello".to_owned());
1471    /// let x_ptr = Arc::into_raw(x);
1472    /// assert_eq!(unsafe { &*x_ptr }, "hello");
1473    /// # // Prevent leaks for Miri.
1474    /// # drop(unsafe { Arc::from_raw(x_ptr) });
1475    /// ```
1476    #[must_use = "losing the pointer will leak memory"]
1477    #[stable(feature = "rc_raw", since = "1.17.0")]
1478    #[rustc_never_returns_null_ptr]
1479    pub fn into_raw(this: Self) -> *const T {
1480        let this = ManuallyDrop::new(this);
1481        Self::as_ptr(&*this)
1482    }
1483
1484    /// Increments the strong reference count on the `Arc<T>` associated with the
1485    /// provided pointer by one.
1486    ///
1487    /// # Safety
1488    ///
1489    /// The pointer must have been obtained through `Arc::into_raw` and must satisfy the
1490    /// same layout requirements specified in [`Arc::from_raw_in`][from_raw_in].
1491    /// The associated `Arc` instance must be valid (i.e. the strong count must be at
1492    /// least 1) for the duration of this method, and `ptr` must point to a block of memory
1493    /// allocated by the global allocator.
1494    ///
1495    /// [from_raw_in]: Arc::from_raw_in
1496    ///
1497    /// # Examples
1498    ///
1499    /// ```
1500    /// use std::sync::Arc;
1501    ///
1502    /// let five = Arc::new(5);
1503    ///
1504    /// unsafe {
1505    ///     let ptr = Arc::into_raw(five);
1506    ///     Arc::increment_strong_count(ptr);
1507    ///
1508    ///     // This assertion is deterministic because we haven't shared
1509    ///     // the `Arc` between threads.
1510    ///     let five = Arc::from_raw(ptr);
1511    ///     assert_eq!(2, Arc::strong_count(&five));
1512    /// #   // Prevent leaks for Miri.
1513    /// #   Arc::decrement_strong_count(ptr);
1514    /// }
1515    /// ```
1516    #[inline]
1517    #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
1518    pub unsafe fn increment_strong_count(ptr: *const T) {
1519        unsafe { Arc::increment_strong_count_in(ptr, Global) }
1520    }
1521
1522    /// Decrements the strong reference count on the `Arc<T>` associated with the
1523    /// provided pointer by one.
1524    ///
1525    /// # Safety
1526    ///
1527    /// The pointer must have been obtained through `Arc::into_raw` and must satisfy the
1528    /// same layout requirements specified in [`Arc::from_raw_in`][from_raw_in].
1529    /// The associated `Arc` instance must be valid (i.e. the strong count must be at
1530    /// least 1) when invoking this method, and `ptr` must point to a block of memory
1531    /// allocated by the global allocator. This method can be used to release the final
1532    /// `Arc` and backing storage, but **should not** be called after the final `Arc` has been
1533    /// released.
1534    ///
1535    /// [from_raw_in]: Arc::from_raw_in
1536    ///
1537    /// # Examples
1538    ///
1539    /// ```
1540    /// use std::sync::Arc;
1541    ///
1542    /// let five = Arc::new(5);
1543    ///
1544    /// unsafe {
1545    ///     let ptr = Arc::into_raw(five);
1546    ///     Arc::increment_strong_count(ptr);
1547    ///
1548    ///     // Those assertions are deterministic because we haven't shared
1549    ///     // the `Arc` between threads.
1550    ///     let five = Arc::from_raw(ptr);
1551    ///     assert_eq!(2, Arc::strong_count(&five));
1552    ///     Arc::decrement_strong_count(ptr);
1553    ///     assert_eq!(1, Arc::strong_count(&five));
1554    /// }
1555    /// ```
1556    #[inline]
1557    #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
1558    pub unsafe fn decrement_strong_count(ptr: *const T) {
1559        unsafe { Arc::decrement_strong_count_in(ptr, Global) }
1560    }
1561}
1562
1563impl<T: ?Sized, A: Allocator> Arc<T, A> {
1564    /// Returns a reference to the underlying allocator.
1565    ///
1566    /// Note: this is an associated function, which means that you have
1567    /// to call it as `Arc::allocator(&a)` instead of `a.allocator()`. This
1568    /// is so that there is no conflict with a method on the inner type.
1569    #[inline]
1570    #[unstable(feature = "allocator_api", issue = "32838")]
1571    pub fn allocator(this: &Self) -> &A {
1572        &this.alloc
1573    }
1574
1575    /// Consumes the `Arc`, returning the wrapped pointer and allocator.
1576    ///
1577    /// To avoid a memory leak the pointer must be converted back to an `Arc` using
1578    /// [`Arc::from_raw_in`].
1579    ///
1580    /// # Examples
1581    ///
1582    /// ```
1583    /// #![feature(allocator_api)]
1584    /// use std::sync::Arc;
1585    /// use std::alloc::System;
1586    ///
1587    /// let x = Arc::new_in("hello".to_owned(), System);
1588    /// let (ptr, alloc) = Arc::into_raw_with_allocator(x);
1589    /// assert_eq!(unsafe { &*ptr }, "hello");
1590    /// let x = unsafe { Arc::from_raw_in(ptr, alloc) };
1591    /// assert_eq!(&*x, "hello");
1592    /// ```
1593    #[must_use = "losing the pointer will leak memory"]
1594    #[unstable(feature = "allocator_api", issue = "32838")]
1595    pub fn into_raw_with_allocator(this: Self) -> (*const T, A) {
1596        let this = mem::ManuallyDrop::new(this);
1597        let ptr = Self::as_ptr(&this);
1598        // Safety: `this` is ManuallyDrop so the allocator will not be double-dropped
1599        let alloc = unsafe { ptr::read(&this.alloc) };
1600        (ptr, alloc)
1601    }
1602
1603    /// Provides a raw pointer to the data.
1604    ///
1605    /// The counts are not affected in any way and the `Arc` is not consumed. The pointer is valid for
1606    /// as long as there are strong counts in the `Arc`.
1607    ///
1608    /// # Examples
1609    ///
1610    /// ```
1611    /// use std::sync::Arc;
1612    ///
1613    /// let x = Arc::new("hello".to_owned());
1614    /// let y = Arc::clone(&x);
1615    /// let x_ptr = Arc::as_ptr(&x);
1616    /// assert_eq!(x_ptr, Arc::as_ptr(&y));
1617    /// assert_eq!(unsafe { &*x_ptr }, "hello");
1618    /// ```
1619    #[must_use]
1620    #[stable(feature = "rc_as_ptr", since = "1.45.0")]
1621    #[rustc_never_returns_null_ptr]
1622    pub fn as_ptr(this: &Self) -> *const T {
1623        let ptr: *mut ArcInner<T> = NonNull::as_ptr(this.ptr);
1624
1625        // SAFETY: This cannot go through Deref::deref or RcInnerPtr::inner because
1626        // this is required to retain raw/mut provenance such that e.g. `get_mut` can
1627        // write through the pointer after the Rc is recovered through `from_raw`.
1628        unsafe { &raw mut (*ptr).data }
1629    }
1630
1631    /// Constructs an `Arc<T, A>` from a raw pointer.
1632    ///
1633    /// The raw pointer must have been previously returned by a call to [`Arc<U,
1634    /// A>::into_raw`][into_raw] with the following requirements:
1635    ///
1636    /// * If `U` is sized, it must have the same size and alignment as `T`. This
1637    ///   is trivially true if `U` is `T`.
1638    /// * If `U` is unsized, its data pointer must have the same size and
1639    ///   alignment as `T`. This is trivially true if `Arc<U>` was constructed
1640    ///   through `Arc<T>` and then converted to `Arc<U>` through an [unsized
1641    ///   coercion].
1642    ///
1643    /// Note that if `U` or `U`'s data pointer is not `T` but has the same size
1644    /// and alignment, this is basically like transmuting references of
1645    /// different types. See [`mem::transmute`][transmute] for more information
1646    /// on what restrictions apply in this case.
1647    ///
1648    /// The raw pointer must point to a block of memory allocated by `alloc`
1649    ///
1650    /// The user of `from_raw` has to make sure a specific value of `T` is only
1651    /// dropped once.
1652    ///
1653    /// This function is unsafe because improper use may lead to memory unsafety,
1654    /// even if the returned `Arc<T>` is never accessed.
1655    ///
1656    /// [into_raw]: Arc::into_raw
1657    /// [transmute]: core::mem::transmute
1658    /// [unsized coercion]: https://doc.rust-lang.org/reference/type-coercions.html#unsized-coercions
1659    ///
1660    /// # Examples
1661    ///
1662    /// ```
1663    /// #![feature(allocator_api)]
1664    ///
1665    /// use std::sync::Arc;
1666    /// use std::alloc::System;
1667    ///
1668    /// let x = Arc::new_in("hello".to_owned(), System);
1669    /// let (x_ptr, alloc) = Arc::into_raw_with_allocator(x);
1670    ///
1671    /// unsafe {
1672    ///     // Convert back to an `Arc` to prevent leak.
1673    ///     let x = Arc::from_raw_in(x_ptr, System);
1674    ///     assert_eq!(&*x, "hello");
1675    ///
1676    ///     // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe.
1677    /// }
1678    ///
1679    /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
1680    /// ```
1681    ///
1682    /// Convert a slice back into its original array:
1683    ///
1684    /// ```
1685    /// #![feature(allocator_api)]
1686    ///
1687    /// use std::sync::Arc;
1688    /// use std::alloc::System;
1689    ///
1690    /// let x: Arc<[u32], _> = Arc::new_in([1, 2, 3], System);
1691    /// let x_ptr: *const [u32] = Arc::into_raw_with_allocator(x).0;
1692    ///
1693    /// unsafe {
1694    ///     let x: Arc<[u32; 3], _> = Arc::from_raw_in(x_ptr.cast::<[u32; 3]>(), System);
1695    ///     assert_eq!(&*x, &[1, 2, 3]);
1696    /// }
1697    /// ```
1698    #[inline]
1699    #[unstable(feature = "allocator_api", issue = "32838")]
1700    pub unsafe fn from_raw_in(ptr: *const T, alloc: A) -> Self {
1701        unsafe {
1702            let offset = data_offset(ptr);
1703
1704            // Reverse the offset to find the original ArcInner.
1705            let arc_ptr = ptr.byte_sub(offset) as *mut ArcInner<T>;
1706
1707            Self::from_ptr_in(arc_ptr, alloc)
1708        }
1709    }
1710
1711    /// Creates a new [`Weak`] pointer to this allocation.
1712    ///
1713    /// # Examples
1714    ///
1715    /// ```
1716    /// use std::sync::Arc;
1717    ///
1718    /// let five = Arc::new(5);
1719    ///
1720    /// let weak_five = Arc::downgrade(&five);
1721    /// ```
1722    #[must_use = "this returns a new `Weak` pointer, \
1723                  without modifying the original `Arc`"]
1724    #[stable(feature = "arc_weak", since = "1.4.0")]
1725    pub fn downgrade(this: &Self) -> Weak<T, A>
1726    where
1727        A: Clone,
1728    {
1729        // This Relaxed is OK because we're checking the value in the CAS
1730        // below.
1731        let mut cur = this.inner().weak.load(Relaxed);
1732
1733        loop {
1734            // check if the weak counter is currently "locked"; if so, spin.
1735            if cur == usize::MAX {
1736                hint::spin_loop();
1737                cur = this.inner().weak.load(Relaxed);
1738                continue;
1739            }
1740
1741            // We can't allow the refcount to increase much past `MAX_REFCOUNT`.
1742            assert!(cur <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
1743
1744            // NOTE: this code currently ignores the possibility of overflow
1745            // into usize::MAX; in general both Rc and Arc need to be adjusted
1746            // to deal with overflow.
1747
1748            // Unlike with Clone(), we need this to be an Acquire read to
1749            // synchronize with the write coming from `is_unique`, so that the
1750            // events prior to that write happen before this read.
1751            match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) {
1752                Ok(_) => {
1753                    // Make sure we do not create a dangling Weak
1754                    debug_assert!(!is_dangling(this.ptr.as_ptr()));
1755                    return Weak { ptr: this.ptr, alloc: this.alloc.clone() };
1756                }
1757                Err(old) => cur = old,
1758            }
1759        }
1760    }
1761
1762    /// Gets the number of [`Weak`] pointers to this allocation.
1763    ///
1764    /// # Safety
1765    ///
1766    /// This method by itself is safe, but using it correctly requires extra care.
1767    /// Another thread can change the weak count at any time,
1768    /// including potentially between calling this method and acting on the result.
1769    ///
1770    /// # Examples
1771    ///
1772    /// ```
1773    /// use std::sync::Arc;
1774    ///
1775    /// let five = Arc::new(5);
1776    /// let _weak_five = Arc::downgrade(&five);
1777    ///
1778    /// // This assertion is deterministic because we haven't shared
1779    /// // the `Arc` or `Weak` between threads.
1780    /// assert_eq!(1, Arc::weak_count(&five));
1781    /// ```
1782    #[inline]
1783    #[must_use]
1784    #[stable(feature = "arc_counts", since = "1.15.0")]
1785    pub fn weak_count(this: &Self) -> usize {
1786        let cnt = this.inner().weak.load(Relaxed);
1787        // If the weak count is currently locked, the value of the
1788        // count was 0 just before taking the lock.
1789        if cnt == usize::MAX { 0 } else { cnt - 1 }
1790    }
1791
1792    /// Gets the number of strong (`Arc`) pointers to this allocation.
1793    ///
1794    /// # Safety
1795    ///
1796    /// This method by itself is safe, but using it correctly requires extra care.
1797    /// Another thread can change the strong count at any time,
1798    /// including potentially between calling this method and acting on the result.
1799    ///
1800    /// # Examples
1801    ///
1802    /// ```
1803    /// use std::sync::Arc;
1804    ///
1805    /// let five = Arc::new(5);
1806    /// let _also_five = Arc::clone(&five);
1807    ///
1808    /// // This assertion is deterministic because we haven't shared
1809    /// // the `Arc` between threads.
1810    /// assert_eq!(2, Arc::strong_count(&five));
1811    /// ```
1812    #[inline]
1813    #[must_use]
1814    #[stable(feature = "arc_counts", since = "1.15.0")]
1815    pub fn strong_count(this: &Self) -> usize {
1816        this.inner().strong.load(Relaxed)
1817    }
1818
1819    /// Increments the strong reference count on the `Arc<T>` associated with the
1820    /// provided pointer by one.
1821    ///
1822    /// # Safety
1823    ///
1824    /// The pointer must have been obtained through `Arc::into_raw` and must satisfy the
1825    /// same layout requirements specified in [`Arc::from_raw_in`][from_raw_in].
1826    /// The associated `Arc` instance must be valid (i.e. the strong count must be at
1827    /// least 1) for the duration of this method, and `ptr` must point to a block of memory
1828    /// allocated by `alloc`.
1829    ///
1830    /// [from_raw_in]: Arc::from_raw_in
1831    ///
1832    /// # Examples
1833    ///
1834    /// ```
1835    /// #![feature(allocator_api)]
1836    ///
1837    /// use std::sync::Arc;
1838    /// use std::alloc::System;
1839    ///
1840    /// let five = Arc::new_in(5, System);
1841    ///
1842    /// unsafe {
1843    ///     let (ptr, _alloc) = Arc::into_raw_with_allocator(five);
1844    ///     Arc::increment_strong_count_in(ptr, System);
1845    ///
1846    ///     // This assertion is deterministic because we haven't shared
1847    ///     // the `Arc` between threads.
1848    ///     let five = Arc::from_raw_in(ptr, System);
1849    ///     assert_eq!(2, Arc::strong_count(&five));
1850    /// #   // Prevent leaks for Miri.
1851    /// #   Arc::decrement_strong_count_in(ptr, System);
1852    /// }
1853    /// ```
1854    #[inline]
1855    #[unstable(feature = "allocator_api", issue = "32838")]
1856    pub unsafe fn increment_strong_count_in(ptr: *const T, alloc: A)
1857    where
1858        A: Clone,
1859    {
1860        // Retain Arc, but don't touch refcount by wrapping in ManuallyDrop
1861        let arc = unsafe { mem::ManuallyDrop::new(Arc::from_raw_in(ptr, alloc)) };
1862        // Now increase refcount, but don't drop new refcount either
1863        let _arc_clone: mem::ManuallyDrop<_> = arc.clone();
1864    }
1865
1866    /// Decrements the strong reference count on the `Arc<T>` associated with the
1867    /// provided pointer by one.
1868    ///
1869    /// # Safety
1870    ///
1871    /// The pointer must have been obtained through `Arc::into_raw` and must satisfy the
1872    /// same layout requirements specified in [`Arc::from_raw_in`][from_raw_in].
1873    /// The associated `Arc` instance must be valid (i.e. the strong count must be at
1874    /// least 1) when invoking this method, and `ptr` must point to a block of memory
1875    /// allocated by `alloc`. This method can be used to release the final
1876    /// `Arc` and backing storage, but **should not** be called after the final `Arc` has been
1877    /// released.
1878    ///
1879    /// [from_raw_in]: Arc::from_raw_in
1880    ///
1881    /// # Examples
1882    ///
1883    /// ```
1884    /// #![feature(allocator_api)]
1885    ///
1886    /// use std::sync::Arc;
1887    /// use std::alloc::System;
1888    ///
1889    /// let five = Arc::new_in(5, System);
1890    ///
1891    /// unsafe {
1892    ///     let (ptr, _alloc) = Arc::into_raw_with_allocator(five);
1893    ///     Arc::increment_strong_count_in(ptr, System);
1894    ///
1895    ///     // Those assertions are deterministic because we haven't shared
1896    ///     // the `Arc` between threads.
1897    ///     let five = Arc::from_raw_in(ptr, System);
1898    ///     assert_eq!(2, Arc::strong_count(&five));
1899    ///     Arc::decrement_strong_count_in(ptr, System);
1900    ///     assert_eq!(1, Arc::strong_count(&five));
1901    /// }
1902    /// ```
1903    #[inline]
1904    #[unstable(feature = "allocator_api", issue = "32838")]
1905    pub unsafe fn decrement_strong_count_in(ptr: *const T, alloc: A) {
1906        unsafe { drop(Arc::from_raw_in(ptr, alloc)) };
1907    }
1908
1909    #[inline]
1910    fn inner(&self) -> &ArcInner<T> {
1911        // This unsafety is ok because while this arc is alive we're guaranteed
1912        // that the inner pointer is valid. Furthermore, we know that the
1913        // `ArcInner` structure itself is `Sync` because the inner data is
1914        // `Sync` as well, so we're ok loaning out an immutable pointer to these
1915        // contents.
1916        unsafe { self.ptr.as_ref() }
1917    }
1918
1919    // Non-inlined part of `drop`.
1920    #[inline(never)]
1921    unsafe fn drop_slow(&mut self) {
1922        // Drop the weak ref collectively held by all strong references when this
1923        // variable goes out of scope. This ensures that the memory is deallocated
1924        // even if the destructor of `T` panics.
1925        // Take a reference to `self.alloc` instead of cloning because 1. it'll last long
1926        // enough, and 2. you should be able to drop `Arc`s with unclonable allocators
1927        let _weak = Weak { ptr: self.ptr, alloc: &self.alloc };
1928
1929        // Destroy the data at this time, even though we must not free the box
1930        // allocation itself (there might still be weak pointers lying around).
1931        // We cannot use `get_mut_unchecked` here, because `self.alloc` is borrowed.
1932        unsafe { ptr::drop_in_place(&mut (*self.ptr.as_ptr()).data) };
1933    }
1934
1935    /// Returns `true` if the two `Arc`s point to the same allocation in a vein similar to
1936    /// [`ptr::eq`]. This function ignores the metadata of  `dyn Trait` pointers.
1937    ///
1938    /// # Examples
1939    ///
1940    /// ```
1941    /// use std::sync::Arc;
1942    ///
1943    /// let five = Arc::new(5);
1944    /// let same_five = Arc::clone(&five);
1945    /// let other_five = Arc::new(5);
1946    ///
1947    /// assert!(Arc::ptr_eq(&five, &same_five));
1948    /// assert!(!Arc::ptr_eq(&five, &other_five));
1949    /// ```
1950    ///
1951    /// [`ptr::eq`]: core::ptr::eq "ptr::eq"
1952    #[inline]
1953    #[must_use]
1954    #[stable(feature = "ptr_eq", since = "1.17.0")]
1955    pub fn ptr_eq(this: &Self, other: &Self) -> bool {
1956        ptr::addr_eq(this.ptr.as_ptr(), other.ptr.as_ptr())
1957    }
1958}
1959
1960impl<T: ?Sized> Arc<T> {
1961    /// Allocates an `ArcInner<T>` with sufficient space for
1962    /// a possibly-unsized inner value where the value has the layout provided.
1963    ///
1964    /// The function `mem_to_arcinner` is called with the data pointer
1965    /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
1966    #[cfg(not(no_global_oom_handling))]
1967    unsafe fn allocate_for_layout(
1968        value_layout: Layout,
1969        allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
1970        mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
1971    ) -> *mut ArcInner<T> {
1972        let layout = arcinner_layout_for_value_layout(value_layout);
1973
1974        let ptr = allocate(layout).unwrap_or_else(|_| handle_alloc_error(layout));
1975
1976        unsafe { Self::initialize_arcinner(ptr, layout, mem_to_arcinner) }
1977    }
1978
1979    /// Allocates an `ArcInner<T>` with sufficient space for
1980    /// a possibly-unsized inner value where the value has the layout provided,
1981    /// returning an error if allocation fails.
1982    ///
1983    /// The function `mem_to_arcinner` is called with the data pointer
1984    /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
1985    unsafe fn try_allocate_for_layout(
1986        value_layout: Layout,
1987        allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
1988        mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
1989    ) -> Result<*mut ArcInner<T>, AllocError> {
1990        let layout = arcinner_layout_for_value_layout(value_layout);
1991
1992        let ptr = allocate(layout)?;
1993
1994        let inner = unsafe { Self::initialize_arcinner(ptr, layout, mem_to_arcinner) };
1995
1996        Ok(inner)
1997    }
1998
1999    unsafe fn initialize_arcinner(
2000        ptr: NonNull<[u8]>,
2001        layout: Layout,
2002        mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
2003    ) -> *mut ArcInner<T> {
2004        let inner = mem_to_arcinner(ptr.as_non_null_ptr().as_ptr());
2005        debug_assert_eq!(unsafe { Layout::for_value_raw(inner) }, layout);
2006
2007        unsafe {
2008            (&raw mut (*inner).strong).write(atomic::AtomicUsize::new(1));
2009            (&raw mut (*inner).weak).write(atomic::AtomicUsize::new(1));
2010        }
2011
2012        inner
2013    }
2014}
2015
2016impl<T: ?Sized, A: Allocator> Arc<T, A> {
2017    /// Allocates an `ArcInner<T>` with sufficient space for an unsized inner value.
2018    #[inline]
2019    #[cfg(not(no_global_oom_handling))]
2020    unsafe fn allocate_for_ptr_in(ptr: *const T, alloc: &A) -> *mut ArcInner<T> {
2021        // Allocate for the `ArcInner<T>` using the given value.
2022        unsafe {
2023            Arc::allocate_for_layout(
2024                Layout::for_value_raw(ptr),
2025                |layout| alloc.allocate(layout),
2026                |mem| mem.with_metadata_of(ptr as *const ArcInner<T>),
2027            )
2028        }
2029    }
2030
2031    #[cfg(not(no_global_oom_handling))]
2032    fn from_box_in(src: Box<T, A>) -> Arc<T, A> {
2033        unsafe {
2034            let value_size = size_of_val(&*src);
2035            let ptr = Self::allocate_for_ptr_in(&*src, Box::allocator(&src));
2036
2037            // Copy value as bytes
2038            ptr::copy_nonoverlapping(
2039                (&raw const *src) as *const u8,
2040                (&raw mut (*ptr).data) as *mut u8,
2041                value_size,
2042            );
2043
2044            // Free the allocation without dropping its contents
2045            let (bptr, alloc) = Box::into_raw_with_allocator(src);
2046            let src = Box::from_raw_in(bptr as *mut mem::ManuallyDrop<T>, alloc.by_ref());
2047            drop(src);
2048
2049            Self::from_ptr_in(ptr, alloc)
2050        }
2051    }
2052}
2053
2054impl<T> Arc<[T]> {
2055    /// Allocates an `ArcInner<[T]>` with the given length.
2056    #[cfg(not(no_global_oom_handling))]
2057    unsafe fn allocate_for_slice(len: usize) -> *mut ArcInner<[T]> {
2058        unsafe {
2059            Self::allocate_for_layout(
2060                Layout::array::<T>(len).unwrap(),
2061                |layout| Global.allocate(layout),
2062                |mem| ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len) as *mut ArcInner<[T]>,
2063            )
2064        }
2065    }
2066
2067    /// Copy elements from slice into newly allocated `Arc<[T]>`
2068    ///
2069    /// Unsafe because the caller must either take ownership or bind `T: Copy`.
2070    #[cfg(not(no_global_oom_handling))]
2071    unsafe fn copy_from_slice(v: &[T]) -> Arc<[T]> {
2072        unsafe {
2073            let ptr = Self::allocate_for_slice(v.len());
2074
2075            ptr::copy_nonoverlapping(v.as_ptr(), (&raw mut (*ptr).data) as *mut T, v.len());
2076
2077            Self::from_ptr(ptr)
2078        }
2079    }
2080
2081    /// Constructs an `Arc<[T]>` from an iterator known to be of a certain size.
2082    ///
2083    /// Behavior is undefined should the size be wrong.
2084    #[cfg(not(no_global_oom_handling))]
2085    unsafe fn from_iter_exact(iter: impl Iterator<Item = T>, len: usize) -> Arc<[T]> {
2086        // Panic guard while cloning T elements.
2087        // In the event of a panic, elements that have been written
2088        // into the new ArcInner will be dropped, then the memory freed.
2089        struct Guard<T> {
2090            mem: NonNull<u8>,
2091            elems: *mut T,
2092            layout: Layout,
2093            n_elems: usize,
2094        }
2095
2096        impl<T> Drop for Guard<T> {
2097            fn drop(&mut self) {
2098                unsafe {
2099                    let slice = from_raw_parts_mut(self.elems, self.n_elems);
2100                    ptr::drop_in_place(slice);
2101
2102                    Global.deallocate(self.mem, self.layout);
2103                }
2104            }
2105        }
2106
2107        unsafe {
2108            let ptr = Self::allocate_for_slice(len);
2109
2110            let mem = ptr as *mut _ as *mut u8;
2111            let layout = Layout::for_value_raw(ptr);
2112
2113            // Pointer to first element
2114            let elems = (&raw mut (*ptr).data) as *mut T;
2115
2116            let mut guard = Guard { mem: NonNull::new_unchecked(mem), elems, layout, n_elems: 0 };
2117
2118            for (i, item) in iter.enumerate() {
2119                ptr::write(elems.add(i), item);
2120                guard.n_elems += 1;
2121            }
2122
2123            // All clear. Forget the guard so it doesn't free the new ArcInner.
2124            mem::forget(guard);
2125
2126            Self::from_ptr(ptr)
2127        }
2128    }
2129}
2130
2131impl<T, A: Allocator> Arc<[T], A> {
2132    /// Allocates an `ArcInner<[T]>` with the given length.
2133    #[inline]
2134    #[cfg(not(no_global_oom_handling))]
2135    unsafe fn allocate_for_slice_in(len: usize, alloc: &A) -> *mut ArcInner<[T]> {
2136        unsafe {
2137            Arc::allocate_for_layout(
2138                Layout::array::<T>(len).unwrap(),
2139                |layout| alloc.allocate(layout),
2140                |mem| ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len) as *mut ArcInner<[T]>,
2141            )
2142        }
2143    }
2144}
2145
2146/// Specialization trait used for `From<&[T]>`.
2147#[cfg(not(no_global_oom_handling))]
2148trait ArcFromSlice<T> {
2149    fn from_slice(slice: &[T]) -> Self;
2150}
2151
2152#[cfg(not(no_global_oom_handling))]
2153impl<T: Clone> ArcFromSlice<T> for Arc<[T]> {
2154    #[inline]
2155    default fn from_slice(v: &[T]) -> Self {
2156        unsafe { Self::from_iter_exact(v.iter().cloned(), v.len()) }
2157    }
2158}
2159
2160#[cfg(not(no_global_oom_handling))]
2161impl<T: Copy> ArcFromSlice<T> for Arc<[T]> {
2162    #[inline]
2163    fn from_slice(v: &[T]) -> Self {
2164        unsafe { Arc::copy_from_slice(v) }
2165    }
2166}
2167
2168#[stable(feature = "rust1", since = "1.0.0")]
2169impl<T: ?Sized, A: Allocator + Clone> Clone for Arc<T, A> {
2170    /// Makes a clone of the `Arc` pointer.
2171    ///
2172    /// This creates another pointer to the same allocation, increasing the
2173    /// strong reference count.
2174    ///
2175    /// # Examples
2176    ///
2177    /// ```
2178    /// use std::sync::Arc;
2179    ///
2180    /// let five = Arc::new(5);
2181    ///
2182    /// let _ = Arc::clone(&five);
2183    /// ```
2184    #[inline]
2185    fn clone(&self) -> Arc<T, A> {
2186        // Using a relaxed ordering is alright here, as knowledge of the
2187        // original reference prevents other threads from erroneously deleting
2188        // the object.
2189        //
2190        // As explained in the [Boost documentation][1], Increasing the
2191        // reference counter can always be done with memory_order_relaxed: New
2192        // references to an object can only be formed from an existing
2193        // reference, and passing an existing reference from one thread to
2194        // another must already provide any required synchronization.
2195        //
2196        // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
2197        let old_size = self.inner().strong.fetch_add(1, Relaxed);
2198
2199        // However we need to guard against massive refcounts in case someone is `mem::forget`ing
2200        // Arcs. If we don't do this the count can overflow and users will use-after free. This
2201        // branch will never be taken in any realistic program. We abort because such a program is
2202        // incredibly degenerate, and we don't care to support it.
2203        //
2204        // This check is not 100% water-proof: we error when the refcount grows beyond `isize::MAX`.
2205        // But we do that check *after* having done the increment, so there is a chance here that
2206        // the worst already happened and we actually do overflow the `usize` counter. However, that
2207        // requires the counter to grow from `isize::MAX` to `usize::MAX` between the increment
2208        // above and the `abort` below, which seems exceedingly unlikely.
2209        //
2210        // This is a global invariant, and also applies when using a compare-exchange loop to increment
2211        // counters in other methods.
2212        // Otherwise, the counter could be brought to an almost-overflow using a compare-exchange loop,
2213        // and then overflow using a few `fetch_add`s.
2214        if old_size > MAX_REFCOUNT {
2215            abort();
2216        }
2217
2218        unsafe { Self::from_inner_in(self.ptr, self.alloc.clone()) }
2219    }
2220}
2221
2222#[unstable(feature = "ergonomic_clones", issue = "132290")]
2223impl<T: ?Sized, A: Allocator + Clone> UseCloned for Arc<T, A> {}
2224
2225#[stable(feature = "rust1", since = "1.0.0")]
2226impl<T: ?Sized, A: Allocator> Deref for Arc<T, A> {
2227    type Target = T;
2228
2229    #[inline]
2230    fn deref(&self) -> &T {
2231        &self.inner().data
2232    }
2233}
2234
2235#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")]
2236unsafe impl<T: ?Sized, A: Allocator> PinCoerceUnsized for Arc<T, A> {}
2237
2238#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")]
2239unsafe impl<T: ?Sized, A: Allocator> PinCoerceUnsized for Weak<T, A> {}
2240
2241#[unstable(feature = "deref_pure_trait", issue = "87121")]
2242unsafe impl<T: ?Sized, A: Allocator> DerefPure for Arc<T, A> {}
2243
2244#[unstable(feature = "legacy_receiver_trait", issue = "none")]
2245impl<T: ?Sized> LegacyReceiver for Arc<T> {}
2246
2247#[cfg(not(no_global_oom_handling))]
2248impl<T: ?Sized + CloneToUninit, A: Allocator + Clone> Arc<T, A> {
2249    /// Makes a mutable reference into the given `Arc`.
2250    ///
2251    /// If there are other `Arc` pointers to the same allocation, then `make_mut` will
2252    /// [`clone`] the inner value to a new allocation to ensure unique ownership.  This is also
2253    /// referred to as clone-on-write.
2254    ///
2255    /// However, if there are no other `Arc` pointers to this allocation, but some [`Weak`]
2256    /// pointers, then the [`Weak`] pointers will be dissociated and the inner value will not
2257    /// be cloned.
2258    ///
2259    /// See also [`get_mut`], which will fail rather than cloning the inner value
2260    /// or dissociating [`Weak`] pointers.
2261    ///
2262    /// [`clone`]: Clone::clone
2263    /// [`get_mut`]: Arc::get_mut
2264    ///
2265    /// # Examples
2266    ///
2267    /// ```
2268    /// use std::sync::Arc;
2269    ///
2270    /// let mut data = Arc::new(5);
2271    ///
2272    /// *Arc::make_mut(&mut data) += 1;         // Won't clone anything
2273    /// let mut other_data = Arc::clone(&data); // Won't clone inner data
2274    /// *Arc::make_mut(&mut data) += 1;         // Clones inner data
2275    /// *Arc::make_mut(&mut data) += 1;         // Won't clone anything
2276    /// *Arc::make_mut(&mut other_data) *= 2;   // Won't clone anything
2277    ///
2278    /// // Now `data` and `other_data` point to different allocations.
2279    /// assert_eq!(*data, 8);
2280    /// assert_eq!(*other_data, 12);
2281    /// ```
2282    ///
2283    /// [`Weak`] pointers will be dissociated:
2284    ///
2285    /// ```
2286    /// use std::sync::Arc;
2287    ///
2288    /// let mut data = Arc::new(75);
2289    /// let weak = Arc::downgrade(&data);
2290    ///
2291    /// assert!(75 == *data);
2292    /// assert!(75 == *weak.upgrade().unwrap());
2293    ///
2294    /// *Arc::make_mut(&mut data) += 1;
2295    ///
2296    /// assert!(76 == *data);
2297    /// assert!(weak.upgrade().is_none());
2298    /// ```
2299    #[inline]
2300    #[stable(feature = "arc_unique", since = "1.4.0")]
2301    pub fn make_mut(this: &mut Self) -> &mut T {
2302        let size_of_val = size_of_val::<T>(&**this);
2303
2304        // Note that we hold both a strong reference and a weak reference.
2305        // Thus, releasing our strong reference only will not, by itself, cause
2306        // the memory to be deallocated.
2307        //
2308        // Use Acquire to ensure that we see any writes to `weak` that happen
2309        // before release writes (i.e., decrements) to `strong`. Since we hold a
2310        // weak count, there's no chance the ArcInner itself could be
2311        // deallocated.
2312        if this.inner().strong.compare_exchange(1, 0, Acquire, Relaxed).is_err() {
2313            // Another strong pointer exists, so we must clone.
2314
2315            let this_data_ref: &T = &**this;
2316            // `in_progress` drops the allocation if we panic before finishing initializing it.
2317            let mut in_progress: UniqueArcUninit<T, A> =
2318                UniqueArcUninit::new(this_data_ref, this.alloc.clone());
2319
2320            let initialized_clone = unsafe {
2321                // Clone. If the clone panics, `in_progress` will be dropped and clean up.
2322                this_data_ref.clone_to_uninit(in_progress.data_ptr().cast());
2323                // Cast type of pointer, now that it is initialized.
2324                in_progress.into_arc()
2325            };
2326            *this = initialized_clone;
2327        } else if this.inner().weak.load(Relaxed) != 1 {
2328            // Relaxed suffices in the above because this is fundamentally an
2329            // optimization: we are always racing with weak pointers being
2330            // dropped. Worst case, we end up allocated a new Arc unnecessarily.
2331
2332            // We removed the last strong ref, but there are additional weak
2333            // refs remaining. We'll move the contents to a new Arc, and
2334            // invalidate the other weak refs.
2335
2336            // Note that it is not possible for the read of `weak` to yield
2337            // usize::MAX (i.e., locked), since the weak count can only be
2338            // locked by a thread with a strong reference.
2339
2340            // Materialize our own implicit weak pointer, so that it can clean
2341            // up the ArcInner as needed.
2342            let _weak = Weak { ptr: this.ptr, alloc: this.alloc.clone() };
2343
2344            // Can just steal the data, all that's left is Weaks
2345            //
2346            // We don't need panic-protection like the above branch does, but we might as well
2347            // use the same mechanism.
2348            let mut in_progress: UniqueArcUninit<T, A> =
2349                UniqueArcUninit::new(&**this, this.alloc.clone());
2350            unsafe {
2351                // Initialize `in_progress` with move of **this.
2352                // We have to express this in terms of bytes because `T: ?Sized`; there is no
2353                // operation that just copies a value based on its `size_of_val()`.
2354                ptr::copy_nonoverlapping(
2355                    ptr::from_ref(&**this).cast::<u8>(),
2356                    in_progress.data_ptr().cast::<u8>(),
2357                    size_of_val,
2358                );
2359
2360                ptr::write(this, in_progress.into_arc());
2361            }
2362        } else {
2363            // We were the sole reference of either kind; bump back up the
2364            // strong ref count.
2365            this.inner().strong.store(1, Release);
2366        }
2367
2368        // As with `get_mut()`, the unsafety is ok because our reference was
2369        // either unique to begin with, or became one upon cloning the contents.
2370        unsafe { Self::get_mut_unchecked(this) }
2371    }
2372}
2373
2374impl<T: Clone, A: Allocator> Arc<T, A> {
2375    /// If we have the only reference to `T` then unwrap it. Otherwise, clone `T` and return the
2376    /// clone.
2377    ///
2378    /// Assuming `arc_t` is of type `Arc<T>`, this function is functionally equivalent to
2379    /// `(*arc_t).clone()`, but will avoid cloning the inner value where possible.
2380    ///
2381    /// # Examples
2382    ///
2383    /// ```
2384    /// # use std::{ptr, sync::Arc};
2385    /// let inner = String::from("test");
2386    /// let ptr = inner.as_ptr();
2387    ///
2388    /// let arc = Arc::new(inner);
2389    /// let inner = Arc::unwrap_or_clone(arc);
2390    /// // The inner value was not cloned
2391    /// assert!(ptr::eq(ptr, inner.as_ptr()));
2392    ///
2393    /// let arc = Arc::new(inner);
2394    /// let arc2 = arc.clone();
2395    /// let inner = Arc::unwrap_or_clone(arc);
2396    /// // Because there were 2 references, we had to clone the inner value.
2397    /// assert!(!ptr::eq(ptr, inner.as_ptr()));
2398    /// // `arc2` is the last reference, so when we unwrap it we get back
2399    /// // the original `String`.
2400    /// let inner = Arc::unwrap_or_clone(arc2);
2401    /// assert!(ptr::eq(ptr, inner.as_ptr()));
2402    /// ```
2403    #[inline]
2404    #[stable(feature = "arc_unwrap_or_clone", since = "1.76.0")]
2405    pub fn unwrap_or_clone(this: Self) -> T {
2406        Arc::try_unwrap(this).unwrap_or_else(|arc| (*arc).clone())
2407    }
2408}
2409
2410impl<T: ?Sized, A: Allocator> Arc<T, A> {
2411    /// Returns a mutable reference into the given `Arc`, if there are
2412    /// no other `Arc` or [`Weak`] pointers to the same allocation.
2413    ///
2414    /// Returns [`None`] otherwise, because it is not safe to
2415    /// mutate a shared value.
2416    ///
2417    /// See also [`make_mut`][make_mut], which will [`clone`][clone]
2418    /// the inner value when there are other `Arc` pointers.
2419    ///
2420    /// [make_mut]: Arc::make_mut
2421    /// [clone]: Clone::clone
2422    ///
2423    /// # Examples
2424    ///
2425    /// ```
2426    /// use std::sync::Arc;
2427    ///
2428    /// let mut x = Arc::new(3);
2429    /// *Arc::get_mut(&mut x).unwrap() = 4;
2430    /// assert_eq!(*x, 4);
2431    ///
2432    /// let _y = Arc::clone(&x);
2433    /// assert!(Arc::get_mut(&mut x).is_none());
2434    /// ```
2435    #[inline]
2436    #[stable(feature = "arc_unique", since = "1.4.0")]
2437    pub fn get_mut(this: &mut Self) -> Option<&mut T> {
2438        if Self::is_unique(this) {
2439            // This unsafety is ok because we're guaranteed that the pointer
2440            // returned is the *only* pointer that will ever be returned to T. Our
2441            // reference count is guaranteed to be 1 at this point, and we required
2442            // the Arc itself to be `mut`, so we're returning the only possible
2443            // reference to the inner data.
2444            unsafe { Some(Arc::get_mut_unchecked(this)) }
2445        } else {
2446            None
2447        }
2448    }
2449
2450    /// Returns a mutable reference into the given `Arc`,
2451    /// without any check.
2452    ///
2453    /// See also [`get_mut`], which is safe and does appropriate checks.
2454    ///
2455    /// [`get_mut`]: Arc::get_mut
2456    ///
2457    /// # Safety
2458    ///
2459    /// If any other `Arc` or [`Weak`] pointers to the same allocation exist, then
2460    /// they must not be dereferenced or have active borrows for the duration
2461    /// of the returned borrow, and their inner type must be exactly the same as the
2462    /// inner type of this Rc (including lifetimes). This is trivially the case if no
2463    /// such pointers exist, for example immediately after `Arc::new`.
2464    ///
2465    /// # Examples
2466    ///
2467    /// ```
2468    /// #![feature(get_mut_unchecked)]
2469    ///
2470    /// use std::sync::Arc;
2471    ///
2472    /// let mut x = Arc::new(String::new());
2473    /// unsafe {
2474    ///     Arc::get_mut_unchecked(&mut x).push_str("foo")
2475    /// }
2476    /// assert_eq!(*x, "foo");
2477    /// ```
2478    /// Other `Arc` pointers to the same allocation must be to the same type.
2479    /// ```no_run
2480    /// #![feature(get_mut_unchecked)]
2481    ///
2482    /// use std::sync::Arc;
2483    ///
2484    /// let x: Arc<str> = Arc::from("Hello, world!");
2485    /// let mut y: Arc<[u8]> = x.clone().into();
2486    /// unsafe {
2487    ///     // this is Undefined Behavior, because x's inner type is str, not [u8]
2488    ///     Arc::get_mut_unchecked(&mut y).fill(0xff); // 0xff is invalid in UTF-8
2489    /// }
2490    /// println!("{}", &*x); // Invalid UTF-8 in a str
2491    /// ```
2492    /// Other `Arc` pointers to the same allocation must be to the exact same type, including lifetimes.
2493    /// ```no_run
2494    /// #![feature(get_mut_unchecked)]
2495    ///
2496    /// use std::sync::Arc;
2497    ///
2498    /// let x: Arc<&str> = Arc::new("Hello, world!");
2499    /// {
2500    ///     let s = String::from("Oh, no!");
2501    ///     let mut y: Arc<&str> = x.clone();
2502    ///     unsafe {
2503    ///         // this is Undefined Behavior, because x's inner type
2504    ///         // is &'long str, not &'short str
2505    ///         *Arc::get_mut_unchecked(&mut y) = &s;
2506    ///     }
2507    /// }
2508    /// println!("{}", &*x); // Use-after-free
2509    /// ```
2510    #[inline]
2511    #[unstable(feature = "get_mut_unchecked", issue = "63292")]
2512    pub unsafe fn get_mut_unchecked(this: &mut Self) -> &mut T {
2513        // We are careful to *not* create a reference covering the "count" fields, as
2514        // this would alias with concurrent access to the reference counts (e.g. by `Weak`).
2515        unsafe { &mut (*this.ptr.as_ptr()).data }
2516    }
2517
2518    /// Determine whether this is the unique reference to the underlying data.
2519    ///
2520    /// Returns `true` if there are no other `Arc` or [`Weak`] pointers to the same allocation;
2521    /// returns `false` otherwise.
2522    ///
2523    /// If this function returns `true`, then is guaranteed to be safe to call [`get_mut_unchecked`]
2524    /// on this `Arc`, so long as no clones occur in between.
2525    ///
2526    /// # Examples
2527    ///
2528    /// ```
2529    /// #![feature(arc_is_unique)]
2530    ///
2531    /// use std::sync::Arc;
2532    ///
2533    /// let x = Arc::new(3);
2534    /// assert!(Arc::is_unique(&x));
2535    ///
2536    /// let y = Arc::clone(&x);
2537    /// assert!(!Arc::is_unique(&x));
2538    /// drop(y);
2539    ///
2540    /// // Weak references also count, because they could be upgraded at any time.
2541    /// let z = Arc::downgrade(&x);
2542    /// assert!(!Arc::is_unique(&x));
2543    /// ```
2544    ///
2545    /// # Pointer invalidation
2546    ///
2547    /// This function will always return the same value as `Arc::get_mut(arc).is_some()`. However,
2548    /// unlike that operation it does not produce any mutable references to the underlying data,
2549    /// meaning no pointers to the data inside the `Arc` are invalidated by the call. Thus, the
2550    /// following code is valid, even though it would be UB if it used `Arc::get_mut`:
2551    ///
2552    /// ```
2553    /// #![feature(arc_is_unique)]
2554    ///
2555    /// use std::sync::Arc;
2556    ///
2557    /// let arc = Arc::new(5);
2558    /// let pointer: *const i32 = &*arc;
2559    /// assert!(Arc::is_unique(&arc));
2560    /// assert_eq!(unsafe { *pointer }, 5);
2561    /// ```
2562    ///
2563    /// # Atomic orderings
2564    ///
2565    /// Concurrent drops to other `Arc` pointers to the same allocation will synchronize with this
2566    /// call - that is, this call performs an `Acquire` operation on the underlying strong and weak
2567    /// ref counts. This ensures that calling `get_mut_unchecked` is safe.
2568    ///
2569    /// Note that this operation requires locking the weak ref count, so concurrent calls to
2570    /// `downgrade` may spin-loop for a short period of time.
2571    ///
2572    /// [`get_mut_unchecked`]: Self::get_mut_unchecked
2573    #[inline]
2574    #[unstable(feature = "arc_is_unique", issue = "138938")]
2575    pub fn is_unique(this: &Self) -> bool {
2576        // lock the weak pointer count if we appear to be the sole weak pointer
2577        // holder.
2578        //
2579        // The acquire label here ensures a happens-before relationship with any
2580        // writes to `strong` (in particular in `Weak::upgrade`) prior to decrements
2581        // of the `weak` count (via `Weak::drop`, which uses release). If the upgraded
2582        // weak ref was never dropped, the CAS here will fail so we do not care to synchronize.
2583        if this.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() {
2584            // This needs to be an `Acquire` to synchronize with the decrement of the `strong`
2585            // counter in `drop` -- the only access that happens when any but the last reference
2586            // is being dropped.
2587            let unique = this.inner().strong.load(Acquire) == 1;
2588
2589            // The release write here synchronizes with a read in `downgrade`,
2590            // effectively preventing the above read of `strong` from happening
2591            // after the write.
2592            this.inner().weak.store(1, Release); // release the lock
2593            unique
2594        } else {
2595            false
2596        }
2597    }
2598}
2599
2600#[stable(feature = "rust1", since = "1.0.0")]
2601unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Arc<T, A> {
2602    /// Drops the `Arc`.
2603    ///
2604    /// This will decrement the strong reference count. If the strong reference
2605    /// count reaches zero then the only other references (if any) are
2606    /// [`Weak`], so we `drop` the inner value.
2607    ///
2608    /// # Examples
2609    ///
2610    /// ```
2611    /// use std::sync::Arc;
2612    ///
2613    /// struct Foo;
2614    ///
2615    /// impl Drop for Foo {
2616    ///     fn drop(&mut self) {
2617    ///         println!("dropped!");
2618    ///     }
2619    /// }
2620    ///
2621    /// let foo  = Arc::new(Foo);
2622    /// let foo2 = Arc::clone(&foo);
2623    ///
2624    /// drop(foo);    // Doesn't print anything
2625    /// drop(foo2);   // Prints "dropped!"
2626    /// ```
2627    #[inline]
2628    fn drop(&mut self) {
2629        // Because `fetch_sub` is already atomic, we do not need to synchronize
2630        // with other threads unless we are going to delete the object. This
2631        // same logic applies to the below `fetch_sub` to the `weak` count.
2632        if self.inner().strong.fetch_sub(1, Release) != 1 {
2633            return;
2634        }
2635
2636        // This fence is needed to prevent reordering of use of the data and
2637        // deletion of the data. Because it is marked `Release`, the decreasing
2638        // of the reference count synchronizes with this `Acquire` fence. This
2639        // means that use of the data happens before decreasing the reference
2640        // count, which happens before this fence, which happens before the
2641        // deletion of the data.
2642        //
2643        // As explained in the [Boost documentation][1],
2644        //
2645        // > It is important to enforce any possible access to the object in one
2646        // > thread (through an existing reference) to *happen before* deleting
2647        // > the object in a different thread. This is achieved by a "release"
2648        // > operation after dropping a reference (any access to the object
2649        // > through this reference must obviously happened before), and an
2650        // > "acquire" operation before deleting the object.
2651        //
2652        // In particular, while the contents of an Arc are usually immutable, it's
2653        // possible to have interior writes to something like a Mutex<T>. Since a
2654        // Mutex is not acquired when it is deleted, we can't rely on its
2655        // synchronization logic to make writes in thread A visible to a destructor
2656        // running in thread B.
2657        //
2658        // Also note that the Acquire fence here could probably be replaced with an
2659        // Acquire load, which could improve performance in highly-contended
2660        // situations. See [2].
2661        //
2662        // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
2663        // [2]: (https://github.com/rust-lang/rust/pull/41714)
2664        acquire!(self.inner().strong);
2665
2666        // Make sure we aren't trying to "drop" the shared static for empty slices
2667        // used by Default::default.
2668        debug_assert!(
2669            !ptr::addr_eq(self.ptr.as_ptr(), &STATIC_INNER_SLICE.inner),
2670            "Arcs backed by a static should never reach a strong count of 0. \
2671            Likely decrement_strong_count or from_raw were called too many times.",
2672        );
2673
2674        unsafe {
2675            self.drop_slow();
2676        }
2677    }
2678}
2679
2680impl<A: Allocator> Arc<dyn Any + Send + Sync, A> {
2681    /// Attempts to downcast the `Arc<dyn Any + Send + Sync>` to a concrete type.
2682    ///
2683    /// # Examples
2684    ///
2685    /// ```
2686    /// use std::any::Any;
2687    /// use std::sync::Arc;
2688    ///
2689    /// fn print_if_string(value: Arc<dyn Any + Send + Sync>) {
2690    ///     if let Ok(string) = value.downcast::<String>() {
2691    ///         println!("String ({}): {}", string.len(), string);
2692    ///     }
2693    /// }
2694    ///
2695    /// let my_string = "Hello World".to_string();
2696    /// print_if_string(Arc::new(my_string));
2697    /// print_if_string(Arc::new(0i8));
2698    /// ```
2699    #[inline]
2700    #[stable(feature = "rc_downcast", since = "1.29.0")]
2701    pub fn downcast<T>(self) -> Result<Arc<T, A>, Self>
2702    where
2703        T: Any + Send + Sync,
2704    {
2705        if (*self).is::<T>() {
2706            unsafe {
2707                let (ptr, alloc) = Arc::into_inner_with_allocator(self);
2708                Ok(Arc::from_inner_in(ptr.cast(), alloc))
2709            }
2710        } else {
2711            Err(self)
2712        }
2713    }
2714
2715    /// Downcasts the `Arc<dyn Any + Send + Sync>` to a concrete type.
2716    ///
2717    /// For a safe alternative see [`downcast`].
2718    ///
2719    /// # Examples
2720    ///
2721    /// ```
2722    /// #![feature(downcast_unchecked)]
2723    ///
2724    /// use std::any::Any;
2725    /// use std::sync::Arc;
2726    ///
2727    /// let x: Arc<dyn Any + Send + Sync> = Arc::new(1_usize);
2728    ///
2729    /// unsafe {
2730    ///     assert_eq!(*x.downcast_unchecked::<usize>(), 1);
2731    /// }
2732    /// ```
2733    ///
2734    /// # Safety
2735    ///
2736    /// The contained value must be of type `T`. Calling this method
2737    /// with the incorrect type is *undefined behavior*.
2738    ///
2739    ///
2740    /// [`downcast`]: Self::downcast
2741    #[inline]
2742    #[unstable(feature = "downcast_unchecked", issue = "90850")]
2743    pub unsafe fn downcast_unchecked<T>(self) -> Arc<T, A>
2744    where
2745        T: Any + Send + Sync,
2746    {
2747        unsafe {
2748            let (ptr, alloc) = Arc::into_inner_with_allocator(self);
2749            Arc::from_inner_in(ptr.cast(), alloc)
2750        }
2751    }
2752}
2753
2754impl<T> Weak<T> {
2755    /// Constructs a new `Weak<T>`, without allocating any memory.
2756    /// Calling [`upgrade`] on the return value always gives [`None`].
2757    ///
2758    /// [`upgrade`]: Weak::upgrade
2759    ///
2760    /// # Examples
2761    ///
2762    /// ```
2763    /// use std::sync::Weak;
2764    ///
2765    /// let empty: Weak<i64> = Weak::new();
2766    /// assert!(empty.upgrade().is_none());
2767    /// ```
2768    #[inline]
2769    #[stable(feature = "downgraded_weak", since = "1.10.0")]
2770    #[rustc_const_stable(feature = "const_weak_new", since = "1.73.0")]
2771    #[must_use]
2772    pub const fn new() -> Weak<T> {
2773        Weak { ptr: NonNull::without_provenance(NonZeroUsize::MAX), alloc: Global }
2774    }
2775}
2776
2777impl<T, A: Allocator> Weak<T, A> {
2778    /// Constructs a new `Weak<T, A>`, without allocating any memory, technically in the provided
2779    /// allocator.
2780    /// Calling [`upgrade`] on the return value always gives [`None`].
2781    ///
2782    /// [`upgrade`]: Weak::upgrade
2783    ///
2784    /// # Examples
2785    ///
2786    /// ```
2787    /// #![feature(allocator_api)]
2788    ///
2789    /// use std::sync::Weak;
2790    /// use std::alloc::System;
2791    ///
2792    /// let empty: Weak<i64, _> = Weak::new_in(System);
2793    /// assert!(empty.upgrade().is_none());
2794    /// ```
2795    #[inline]
2796    #[unstable(feature = "allocator_api", issue = "32838")]
2797    pub fn new_in(alloc: A) -> Weak<T, A> {
2798        Weak { ptr: NonNull::without_provenance(NonZeroUsize::MAX), alloc }
2799    }
2800}
2801
2802/// Helper type to allow accessing the reference counts without
2803/// making any assertions about the data field.
2804struct WeakInner<'a> {
2805    weak: &'a Atomic<usize>,
2806    strong: &'a Atomic<usize>,
2807}
2808
2809impl<T: ?Sized> Weak<T> {
2810    /// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>`.
2811    ///
2812    /// This can be used to safely get a strong reference (by calling [`upgrade`]
2813    /// later) or to deallocate the weak count by dropping the `Weak<T>`.
2814    ///
2815    /// It takes ownership of one weak reference (with the exception of pointers created by [`new`],
2816    /// as these don't own anything; the method still works on them).
2817    ///
2818    /// # Safety
2819    ///
2820    /// The pointer must have originated from the [`into_raw`] and must still own its potential
2821    /// weak reference, and must point to a block of memory allocated by global allocator.
2822    ///
2823    /// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this
2824    /// takes ownership of one weak reference currently represented as a raw pointer (the weak
2825    /// count is not modified by this operation) and therefore it must be paired with a previous
2826    /// call to [`into_raw`].
2827    /// # Examples
2828    ///
2829    /// ```
2830    /// use std::sync::{Arc, Weak};
2831    ///
2832    /// let strong = Arc::new("hello".to_owned());
2833    ///
2834    /// let raw_1 = Arc::downgrade(&strong).into_raw();
2835    /// let raw_2 = Arc::downgrade(&strong).into_raw();
2836    ///
2837    /// assert_eq!(2, Arc::weak_count(&strong));
2838    ///
2839    /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap());
2840    /// assert_eq!(1, Arc::weak_count(&strong));
2841    ///
2842    /// drop(strong);
2843    ///
2844    /// // Decrement the last weak count.
2845    /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
2846    /// ```
2847    ///
2848    /// [`new`]: Weak::new
2849    /// [`into_raw`]: Weak::into_raw
2850    /// [`upgrade`]: Weak::upgrade
2851    #[inline]
2852    #[stable(feature = "weak_into_raw", since = "1.45.0")]
2853    pub unsafe fn from_raw(ptr: *const T) -> Self {
2854        unsafe { Weak::from_raw_in(ptr, Global) }
2855    }
2856
2857    /// Consumes the `Weak<T>` and turns it into a raw pointer.
2858    ///
2859    /// This converts the weak pointer into a raw pointer, while still preserving the ownership of
2860    /// one weak reference (the weak count is not modified by this operation). It can be turned
2861    /// back into the `Weak<T>` with [`from_raw`].
2862    ///
2863    /// The same restrictions of accessing the target of the pointer as with
2864    /// [`as_ptr`] apply.
2865    ///
2866    /// # Examples
2867    ///
2868    /// ```
2869    /// use std::sync::{Arc, Weak};
2870    ///
2871    /// let strong = Arc::new("hello".to_owned());
2872    /// let weak = Arc::downgrade(&strong);
2873    /// let raw = weak.into_raw();
2874    ///
2875    /// assert_eq!(1, Arc::weak_count(&strong));
2876    /// assert_eq!("hello", unsafe { &*raw });
2877    ///
2878    /// drop(unsafe { Weak::from_raw(raw) });
2879    /// assert_eq!(0, Arc::weak_count(&strong));
2880    /// ```
2881    ///
2882    /// [`from_raw`]: Weak::from_raw
2883    /// [`as_ptr`]: Weak::as_ptr
2884    #[must_use = "losing the pointer will leak memory"]
2885    #[stable(feature = "weak_into_raw", since = "1.45.0")]
2886    pub fn into_raw(self) -> *const T {
2887        ManuallyDrop::new(self).as_ptr()
2888    }
2889}
2890
2891impl<T: ?Sized, A: Allocator> Weak<T, A> {
2892    /// Returns a reference to the underlying allocator.
2893    #[inline]
2894    #[unstable(feature = "allocator_api", issue = "32838")]
2895    pub fn allocator(&self) -> &A {
2896        &self.alloc
2897    }
2898
2899    /// Returns a raw pointer to the object `T` pointed to by this `Weak<T>`.
2900    ///
2901    /// The pointer is valid only if there are some strong references. The pointer may be dangling,
2902    /// unaligned or even [`null`] otherwise.
2903    ///
2904    /// # Examples
2905    ///
2906    /// ```
2907    /// use std::sync::Arc;
2908    /// use std::ptr;
2909    ///
2910    /// let strong = Arc::new("hello".to_owned());
2911    /// let weak = Arc::downgrade(&strong);
2912    /// // Both point to the same object
2913    /// assert!(ptr::eq(&*strong, weak.as_ptr()));
2914    /// // The strong here keeps it alive, so we can still access the object.
2915    /// assert_eq!("hello", unsafe { &*weak.as_ptr() });
2916    ///
2917    /// drop(strong);
2918    /// // But not any more. We can do weak.as_ptr(), but accessing the pointer would lead to
2919    /// // undefined behavior.
2920    /// // assert_eq!("hello", unsafe { &*weak.as_ptr() });
2921    /// ```
2922    ///
2923    /// [`null`]: core::ptr::null "ptr::null"
2924    #[must_use]
2925    #[stable(feature = "weak_into_raw", since = "1.45.0")]
2926    pub fn as_ptr(&self) -> *const T {
2927        let ptr: *mut ArcInner<T> = NonNull::as_ptr(self.ptr);
2928
2929        if is_dangling(ptr) {
2930            // If the pointer is dangling, we return the sentinel directly. This cannot be
2931            // a valid payload address, as the payload is at least as aligned as ArcInner (usize).
2932            ptr as *const T
2933        } else {
2934            // SAFETY: if is_dangling returns false, then the pointer is dereferenceable.
2935            // The payload may be dropped at this point, and we have to maintain provenance,
2936            // so use raw pointer manipulation.
2937            unsafe { &raw mut (*ptr).data }
2938        }
2939    }
2940
2941    /// Consumes the `Weak<T>`, returning the wrapped pointer and allocator.
2942    ///
2943    /// This converts the weak pointer into a raw pointer, while still preserving the ownership of
2944    /// one weak reference (the weak count is not modified by this operation). It can be turned
2945    /// back into the `Weak<T>` with [`from_raw_in`].
2946    ///
2947    /// The same restrictions of accessing the target of the pointer as with
2948    /// [`as_ptr`] apply.
2949    ///
2950    /// # Examples
2951    ///
2952    /// ```
2953    /// #![feature(allocator_api)]
2954    /// use std::sync::{Arc, Weak};
2955    /// use std::alloc::System;
2956    ///
2957    /// let strong = Arc::new_in("hello".to_owned(), System);
2958    /// let weak = Arc::downgrade(&strong);
2959    /// let (raw, alloc) = weak.into_raw_with_allocator();
2960    ///
2961    /// assert_eq!(1, Arc::weak_count(&strong));
2962    /// assert_eq!("hello", unsafe { &*raw });
2963    ///
2964    /// drop(unsafe { Weak::from_raw_in(raw, alloc) });
2965    /// assert_eq!(0, Arc::weak_count(&strong));
2966    /// ```
2967    ///
2968    /// [`from_raw_in`]: Weak::from_raw_in
2969    /// [`as_ptr`]: Weak::as_ptr
2970    #[must_use = "losing the pointer will leak memory"]
2971    #[unstable(feature = "allocator_api", issue = "32838")]
2972    pub fn into_raw_with_allocator(self) -> (*const T, A) {
2973        let this = mem::ManuallyDrop::new(self);
2974        let result = this.as_ptr();
2975        // Safety: `this` is ManuallyDrop so the allocator will not be double-dropped
2976        let alloc = unsafe { ptr::read(&this.alloc) };
2977        (result, alloc)
2978    }
2979
2980    /// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>` in the provided
2981    /// allocator.
2982    ///
2983    /// This can be used to safely get a strong reference (by calling [`upgrade`]
2984    /// later) or to deallocate the weak count by dropping the `Weak<T>`.
2985    ///
2986    /// It takes ownership of one weak reference (with the exception of pointers created by [`new`],
2987    /// as these don't own anything; the method still works on them).
2988    ///
2989    /// # Safety
2990    ///
2991    /// The pointer must have originated from the [`into_raw`] and must still own its potential
2992    /// weak reference, and must point to a block of memory allocated by `alloc`.
2993    ///
2994    /// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this
2995    /// takes ownership of one weak reference currently represented as a raw pointer (the weak
2996    /// count is not modified by this operation) and therefore it must be paired with a previous
2997    /// call to [`into_raw`].
2998    /// # Examples
2999    ///
3000    /// ```
3001    /// use std::sync::{Arc, Weak};
3002    ///
3003    /// let strong = Arc::new("hello".to_owned());
3004    ///
3005    /// let raw_1 = Arc::downgrade(&strong).into_raw();
3006    /// let raw_2 = Arc::downgrade(&strong).into_raw();
3007    ///
3008    /// assert_eq!(2, Arc::weak_count(&strong));
3009    ///
3010    /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap());
3011    /// assert_eq!(1, Arc::weak_count(&strong));
3012    ///
3013    /// drop(strong);
3014    ///
3015    /// // Decrement the last weak count.
3016    /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
3017    /// ```
3018    ///
3019    /// [`new`]: Weak::new
3020    /// [`into_raw`]: Weak::into_raw
3021    /// [`upgrade`]: Weak::upgrade
3022    #[inline]
3023    #[unstable(feature = "allocator_api", issue = "32838")]
3024    pub unsafe fn from_raw_in(ptr: *const T, alloc: A) -> Self {
3025        // See Weak::as_ptr for context on how the input pointer is derived.
3026
3027        let ptr = if is_dangling(ptr) {
3028            // This is a dangling Weak.
3029            ptr as *mut ArcInner<T>
3030        } else {
3031            // Otherwise, we're guaranteed the pointer came from a nondangling Weak.
3032            // SAFETY: data_offset is safe to call, as ptr references a real (potentially dropped) T.
3033            let offset = unsafe { data_offset(ptr) };
3034            // Thus, we reverse the offset to get the whole RcInner.
3035            // SAFETY: the pointer originated from a Weak, so this offset is safe.
3036            unsafe { ptr.byte_sub(offset) as *mut ArcInner<T> }
3037        };
3038
3039        // SAFETY: we now have recovered the original Weak pointer, so can create the Weak.
3040        Weak { ptr: unsafe { NonNull::new_unchecked(ptr) }, alloc }
3041    }
3042}
3043
3044impl<T: ?Sized, A: Allocator> Weak<T, A> {
3045    /// Attempts to upgrade the `Weak` pointer to an [`Arc`], delaying
3046    /// dropping of the inner value if successful.
3047    ///
3048    /// Returns [`None`] if the inner value has since been dropped.
3049    ///
3050    /// # Examples
3051    ///
3052    /// ```
3053    /// use std::sync::Arc;
3054    ///
3055    /// let five = Arc::new(5);
3056    ///
3057    /// let weak_five = Arc::downgrade(&five);
3058    ///
3059    /// let strong_five: Option<Arc<_>> = weak_five.upgrade();
3060    /// assert!(strong_five.is_some());
3061    ///
3062    /// // Destroy all strong pointers.
3063    /// drop(strong_five);
3064    /// drop(five);
3065    ///
3066    /// assert!(weak_five.upgrade().is_none());
3067    /// ```
3068    #[must_use = "this returns a new `Arc`, \
3069                  without modifying the original weak pointer"]
3070    #[stable(feature = "arc_weak", since = "1.4.0")]
3071    pub fn upgrade(&self) -> Option<Arc<T, A>>
3072    where
3073        A: Clone,
3074    {
3075        #[inline]
3076        fn checked_increment(n: usize) -> Option<usize> {
3077            // Any write of 0 we can observe leaves the field in permanently zero state.
3078            if n == 0 {
3079                return None;
3080            }
3081            // See comments in `Arc::clone` for why we do this (for `mem::forget`).
3082            assert!(n <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
3083            Some(n + 1)
3084        }
3085
3086        // We use a CAS loop to increment the strong count instead of a
3087        // fetch_add as this function should never take the reference count
3088        // from zero to one.
3089        //
3090        // Relaxed is fine for the failure case because we don't have any expectations about the new state.
3091        // Acquire is necessary for the success case to synchronise with `Arc::new_cyclic`, when the inner
3092        // value can be initialized after `Weak` references have already been created. In that case, we
3093        // expect to observe the fully initialized value.
3094        if self.inner()?.strong.fetch_update(Acquire, Relaxed, checked_increment).is_ok() {
3095            // SAFETY: pointer is not null, verified in checked_increment
3096            unsafe { Some(Arc::from_inner_in(self.ptr, self.alloc.clone())) }
3097        } else {
3098            None
3099        }
3100    }
3101
3102    /// Gets the number of strong (`Arc`) pointers pointing to this allocation.
3103    ///
3104    /// If `self` was created using [`Weak::new`], this will return 0.
3105    #[must_use]
3106    #[stable(feature = "weak_counts", since = "1.41.0")]
3107    pub fn strong_count(&self) -> usize {
3108        if let Some(inner) = self.inner() { inner.strong.load(Relaxed) } else { 0 }
3109    }
3110
3111    /// Gets an approximation of the number of `Weak` pointers pointing to this
3112    /// allocation.
3113    ///
3114    /// If `self` was created using [`Weak::new`], or if there are no remaining
3115    /// strong pointers, this will return 0.
3116    ///
3117    /// # Accuracy
3118    ///
3119    /// Due to implementation details, the returned value can be off by 1 in
3120    /// either direction when other threads are manipulating any `Arc`s or
3121    /// `Weak`s pointing to the same allocation.
3122    #[must_use]
3123    #[stable(feature = "weak_counts", since = "1.41.0")]
3124    pub fn weak_count(&self) -> usize {
3125        if let Some(inner) = self.inner() {
3126            let weak = inner.weak.load(Acquire);
3127            let strong = inner.strong.load(Relaxed);
3128            if strong == 0 {
3129                0
3130            } else {
3131                // Since we observed that there was at least one strong pointer
3132                // after reading the weak count, we know that the implicit weak
3133                // reference (present whenever any strong references are alive)
3134                // was still around when we observed the weak count, and can
3135                // therefore safely subtract it.
3136                weak - 1
3137            }
3138        } else {
3139            0
3140        }
3141    }
3142
3143    /// Returns `None` when the pointer is dangling and there is no allocated `ArcInner`,
3144    /// (i.e., when this `Weak` was created by `Weak::new`).
3145    #[inline]
3146    fn inner(&self) -> Option<WeakInner<'_>> {
3147        let ptr = self.ptr.as_ptr();
3148        if is_dangling(ptr) {
3149            None
3150        } else {
3151            // We are careful to *not* create a reference covering the "data" field, as
3152            // the field may be mutated concurrently (for example, if the last `Arc`
3153            // is dropped, the data field will be dropped in-place).
3154            Some(unsafe { WeakInner { strong: &(*ptr).strong, weak: &(*ptr).weak } })
3155        }
3156    }
3157
3158    /// Returns `true` if the two `Weak`s point to the same allocation similar to [`ptr::eq`], or if
3159    /// both don't point to any allocation (because they were created with `Weak::new()`). However,
3160    /// this function ignores the metadata of  `dyn Trait` pointers.
3161    ///
3162    /// # Notes
3163    ///
3164    /// Since this compares pointers it means that `Weak::new()` will equal each
3165    /// other, even though they don't point to any allocation.
3166    ///
3167    /// # Examples
3168    ///
3169    /// ```
3170    /// use std::sync::Arc;
3171    ///
3172    /// let first_rc = Arc::new(5);
3173    /// let first = Arc::downgrade(&first_rc);
3174    /// let second = Arc::downgrade(&first_rc);
3175    ///
3176    /// assert!(first.ptr_eq(&second));
3177    ///
3178    /// let third_rc = Arc::new(5);
3179    /// let third = Arc::downgrade(&third_rc);
3180    ///
3181    /// assert!(!first.ptr_eq(&third));
3182    /// ```
3183    ///
3184    /// Comparing `Weak::new`.
3185    ///
3186    /// ```
3187    /// use std::sync::{Arc, Weak};
3188    ///
3189    /// let first = Weak::new();
3190    /// let second = Weak::new();
3191    /// assert!(first.ptr_eq(&second));
3192    ///
3193    /// let third_rc = Arc::new(());
3194    /// let third = Arc::downgrade(&third_rc);
3195    /// assert!(!first.ptr_eq(&third));
3196    /// ```
3197    ///
3198    /// [`ptr::eq`]: core::ptr::eq "ptr::eq"
3199    #[inline]
3200    #[must_use]
3201    #[stable(feature = "weak_ptr_eq", since = "1.39.0")]
3202    pub fn ptr_eq(&self, other: &Self) -> bool {
3203        ptr::addr_eq(self.ptr.as_ptr(), other.ptr.as_ptr())
3204    }
3205}
3206
3207#[stable(feature = "arc_weak", since = "1.4.0")]
3208impl<T: ?Sized, A: Allocator + Clone> Clone for Weak<T, A> {
3209    /// Makes a clone of the `Weak` pointer that points to the same allocation.
3210    ///
3211    /// # Examples
3212    ///
3213    /// ```
3214    /// use std::sync::{Arc, Weak};
3215    ///
3216    /// let weak_five = Arc::downgrade(&Arc::new(5));
3217    ///
3218    /// let _ = Weak::clone(&weak_five);
3219    /// ```
3220    #[inline]
3221    fn clone(&self) -> Weak<T, A> {
3222        if let Some(inner) = self.inner() {
3223            // See comments in Arc::clone() for why this is relaxed. This can use a
3224            // fetch_add (ignoring the lock) because the weak count is only locked
3225            // where are *no other* weak pointers in existence. (So we can't be
3226            // running this code in that case).
3227            let old_size = inner.weak.fetch_add(1, Relaxed);
3228
3229            // See comments in Arc::clone() for why we do this (for mem::forget).
3230            if old_size > MAX_REFCOUNT {
3231                abort();
3232            }
3233        }
3234
3235        Weak { ptr: self.ptr, alloc: self.alloc.clone() }
3236    }
3237}
3238
3239#[unstable(feature = "ergonomic_clones", issue = "132290")]
3240impl<T: ?Sized, A: Allocator + Clone> UseCloned for Weak<T, A> {}
3241
3242#[stable(feature = "downgraded_weak", since = "1.10.0")]
3243impl<T> Default for Weak<T> {
3244    /// Constructs a new `Weak<T>`, without allocating memory.
3245    /// Calling [`upgrade`] on the return value always
3246    /// gives [`None`].
3247    ///
3248    /// [`upgrade`]: Weak::upgrade
3249    ///
3250    /// # Examples
3251    ///
3252    /// ```
3253    /// use std::sync::Weak;
3254    ///
3255    /// let empty: Weak<i64> = Default::default();
3256    /// assert!(empty.upgrade().is_none());
3257    /// ```
3258    fn default() -> Weak<T> {
3259        Weak::new()
3260    }
3261}
3262
3263#[stable(feature = "arc_weak", since = "1.4.0")]
3264unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Weak<T, A> {
3265    /// Drops the `Weak` pointer.
3266    ///
3267    /// # Examples
3268    ///
3269    /// ```
3270    /// use std::sync::{Arc, Weak};
3271    ///
3272    /// struct Foo;
3273    ///
3274    /// impl Drop for Foo {
3275    ///     fn drop(&mut self) {
3276    ///         println!("dropped!");
3277    ///     }
3278    /// }
3279    ///
3280    /// let foo = Arc::new(Foo);
3281    /// let weak_foo = Arc::downgrade(&foo);
3282    /// let other_weak_foo = Weak::clone(&weak_foo);
3283    ///
3284    /// drop(weak_foo);   // Doesn't print anything
3285    /// drop(foo);        // Prints "dropped!"
3286    ///
3287    /// assert!(other_weak_foo.upgrade().is_none());
3288    /// ```
3289    fn drop(&mut self) {
3290        // If we find out that we were the last weak pointer, then its time to
3291        // deallocate the data entirely. See the discussion in Arc::drop() about
3292        // the memory orderings
3293        //
3294        // It's not necessary to check for the locked state here, because the
3295        // weak count can only be locked if there was precisely one weak ref,
3296        // meaning that drop could only subsequently run ON that remaining weak
3297        // ref, which can only happen after the lock is released.
3298        let inner = if let Some(inner) = self.inner() { inner } else { return };
3299
3300        if inner.weak.fetch_sub(1, Release) == 1 {
3301            acquire!(inner.weak);
3302
3303            // Make sure we aren't trying to "deallocate" the shared static for empty slices
3304            // used by Default::default.
3305            debug_assert!(
3306                !ptr::addr_eq(self.ptr.as_ptr(), &STATIC_INNER_SLICE.inner),
3307                "Arc/Weaks backed by a static should never be deallocated. \
3308                Likely decrement_strong_count or from_raw were called too many times.",
3309            );
3310
3311            unsafe {
3312                self.alloc.deallocate(self.ptr.cast(), Layout::for_value_raw(self.ptr.as_ptr()))
3313            }
3314        }
3315    }
3316}
3317
3318#[stable(feature = "rust1", since = "1.0.0")]
3319trait ArcEqIdent<T: ?Sized + PartialEq, A: Allocator> {
3320    fn eq(&self, other: &Arc<T, A>) -> bool;
3321    fn ne(&self, other: &Arc<T, A>) -> bool;
3322}
3323
3324#[stable(feature = "rust1", since = "1.0.0")]
3325impl<T: ?Sized + PartialEq, A: Allocator> ArcEqIdent<T, A> for Arc<T, A> {
3326    #[inline]
3327    default fn eq(&self, other: &Arc<T, A>) -> bool {
3328        **self == **other
3329    }
3330    #[inline]
3331    default fn ne(&self, other: &Arc<T, A>) -> bool {
3332        **self != **other
3333    }
3334}
3335
3336/// We're doing this specialization here, and not as a more general optimization on `&T`, because it
3337/// would otherwise add a cost to all equality checks on refs. We assume that `Arc`s are used to
3338/// store large values, that are slow to clone, but also heavy to check for equality, causing this
3339/// cost to pay off more easily. It's also more likely to have two `Arc` clones, that point to
3340/// the same value, than two `&T`s.
3341///
3342/// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive.
3343#[stable(feature = "rust1", since = "1.0.0")]
3344impl<T: ?Sized + crate::rc::MarkerEq, A: Allocator> ArcEqIdent<T, A> for Arc<T, A> {
3345    #[inline]
3346    fn eq(&self, other: &Arc<T, A>) -> bool {
3347        Arc::ptr_eq(self, other) || **self == **other
3348    }
3349
3350    #[inline]
3351    fn ne(&self, other: &Arc<T, A>) -> bool {
3352        !Arc::ptr_eq(self, other) && **self != **other
3353    }
3354}
3355
3356#[stable(feature = "rust1", since = "1.0.0")]
3357impl<T: ?Sized + PartialEq, A: Allocator> PartialEq for Arc<T, A> {
3358    /// Equality for two `Arc`s.
3359    ///
3360    /// Two `Arc`s are equal if their inner values are equal, even if they are
3361    /// stored in different allocation.
3362    ///
3363    /// If `T` also implements `Eq` (implying reflexivity of equality),
3364    /// two `Arc`s that point to the same allocation are always equal.
3365    ///
3366    /// # Examples
3367    ///
3368    /// ```
3369    /// use std::sync::Arc;
3370    ///
3371    /// let five = Arc::new(5);
3372    ///
3373    /// assert!(five == Arc::new(5));
3374    /// ```
3375    #[inline]
3376    fn eq(&self, other: &Arc<T, A>) -> bool {
3377        ArcEqIdent::eq(self, other)
3378    }
3379
3380    /// Inequality for two `Arc`s.
3381    ///
3382    /// Two `Arc`s are not equal if their inner values are not equal.
3383    ///
3384    /// If `T` also implements `Eq` (implying reflexivity of equality),
3385    /// two `Arc`s that point to the same value are always equal.
3386    ///
3387    /// # Examples
3388    ///
3389    /// ```
3390    /// use std::sync::Arc;
3391    ///
3392    /// let five = Arc::new(5);
3393    ///
3394    /// assert!(five != Arc::new(6));
3395    /// ```
3396    #[inline]
3397    fn ne(&self, other: &Arc<T, A>) -> bool {
3398        ArcEqIdent::ne(self, other)
3399    }
3400}
3401
3402#[stable(feature = "rust1", since = "1.0.0")]
3403impl<T: ?Sized + PartialOrd, A: Allocator> PartialOrd for Arc<T, A> {
3404    /// Partial comparison for two `Arc`s.
3405    ///
3406    /// The two are compared by calling `partial_cmp()` on their inner values.
3407    ///
3408    /// # Examples
3409    ///
3410    /// ```
3411    /// use std::sync::Arc;
3412    /// use std::cmp::Ordering;
3413    ///
3414    /// let five = Arc::new(5);
3415    ///
3416    /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&Arc::new(6)));
3417    /// ```
3418    fn partial_cmp(&self, other: &Arc<T, A>) -> Option<Ordering> {
3419        (**self).partial_cmp(&**other)
3420    }
3421
3422    /// Less-than comparison for two `Arc`s.
3423    ///
3424    /// The two are compared by calling `<` on their inner values.
3425    ///
3426    /// # Examples
3427    ///
3428    /// ```
3429    /// use std::sync::Arc;
3430    ///
3431    /// let five = Arc::new(5);
3432    ///
3433    /// assert!(five < Arc::new(6));
3434    /// ```
3435    fn lt(&self, other: &Arc<T, A>) -> bool {
3436        *(*self) < *(*other)
3437    }
3438
3439    /// 'Less than or equal to' comparison for two `Arc`s.
3440    ///
3441    /// The two are compared by calling `<=` on their inner values.
3442    ///
3443    /// # Examples
3444    ///
3445    /// ```
3446    /// use std::sync::Arc;
3447    ///
3448    /// let five = Arc::new(5);
3449    ///
3450    /// assert!(five <= Arc::new(5));
3451    /// ```
3452    fn le(&self, other: &Arc<T, A>) -> bool {
3453        *(*self) <= *(*other)
3454    }
3455
3456    /// Greater-than comparison for two `Arc`s.
3457    ///
3458    /// The two are compared by calling `>` on their inner values.
3459    ///
3460    /// # Examples
3461    ///
3462    /// ```
3463    /// use std::sync::Arc;
3464    ///
3465    /// let five = Arc::new(5);
3466    ///
3467    /// assert!(five > Arc::new(4));
3468    /// ```
3469    fn gt(&self, other: &Arc<T, A>) -> bool {
3470        *(*self) > *(*other)
3471    }
3472
3473    /// 'Greater than or equal to' comparison for two `Arc`s.
3474    ///
3475    /// The two are compared by calling `>=` on their inner values.
3476    ///
3477    /// # Examples
3478    ///
3479    /// ```
3480    /// use std::sync::Arc;
3481    ///
3482    /// let five = Arc::new(5);
3483    ///
3484    /// assert!(five >= Arc::new(5));
3485    /// ```
3486    fn ge(&self, other: &Arc<T, A>) -> bool {
3487        *(*self) >= *(*other)
3488    }
3489}
3490#[stable(feature = "rust1", since = "1.0.0")]
3491impl<T: ?Sized + Ord, A: Allocator> Ord for Arc<T, A> {
3492    /// Comparison for two `Arc`s.
3493    ///
3494    /// The two are compared by calling `cmp()` on their inner values.
3495    ///
3496    /// # Examples
3497    ///
3498    /// ```
3499    /// use std::sync::Arc;
3500    /// use std::cmp::Ordering;
3501    ///
3502    /// let five = Arc::new(5);
3503    ///
3504    /// assert_eq!(Ordering::Less, five.cmp(&Arc::new(6)));
3505    /// ```
3506    fn cmp(&self, other: &Arc<T, A>) -> Ordering {
3507        (**self).cmp(&**other)
3508    }
3509}
3510#[stable(feature = "rust1", since = "1.0.0")]
3511impl<T: ?Sized + Eq, A: Allocator> Eq for Arc<T, A> {}
3512
3513#[stable(feature = "rust1", since = "1.0.0")]
3514impl<T: ?Sized + fmt::Display, A: Allocator> fmt::Display for Arc<T, A> {
3515    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3516        fmt::Display::fmt(&**self, f)
3517    }
3518}
3519
3520#[stable(feature = "rust1", since = "1.0.0")]
3521impl<T: ?Sized + fmt::Debug, A: Allocator> fmt::Debug for Arc<T, A> {
3522    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3523        fmt::Debug::fmt(&**self, f)
3524    }
3525}
3526
3527#[stable(feature = "rust1", since = "1.0.0")]
3528impl<T: ?Sized, A: Allocator> fmt::Pointer for Arc<T, A> {
3529    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3530        fmt::Pointer::fmt(&(&raw const **self), f)
3531    }
3532}
3533
3534#[cfg(not(no_global_oom_handling))]
3535#[stable(feature = "rust1", since = "1.0.0")]
3536impl<T: Default> Default for Arc<T> {
3537    /// Creates a new `Arc<T>`, with the `Default` value for `T`.
3538    ///
3539    /// # Examples
3540    ///
3541    /// ```
3542    /// use std::sync::Arc;
3543    ///
3544    /// let x: Arc<i32> = Default::default();
3545    /// assert_eq!(*x, 0);
3546    /// ```
3547    fn default() -> Arc<T> {
3548        unsafe {
3549            Self::from_inner(
3550                Box::leak(Box::write(
3551                    Box::new_uninit(),
3552                    ArcInner {
3553                        strong: atomic::AtomicUsize::new(1),
3554                        weak: atomic::AtomicUsize::new(1),
3555                        data: T::default(),
3556                    },
3557                ))
3558                .into(),
3559            )
3560        }
3561    }
3562}
3563
3564/// Struct to hold the static `ArcInner` used for empty `Arc<str/CStr/[T]>` as
3565/// returned by `Default::default`.
3566///
3567/// Layout notes:
3568/// * `repr(align(16))` so we can use it for `[T]` with `align_of::<T>() <= 16`.
3569/// * `repr(C)` so `inner` is at offset 0 (and thus guaranteed to actually be aligned to 16).
3570/// * `[u8; 1]` (to be initialized with 0) so it can be used for `Arc<CStr>`.
3571#[repr(C, align(16))]
3572struct SliceArcInnerForStatic {
3573    inner: ArcInner<[u8; 1]>,
3574}
3575#[cfg(not(no_global_oom_handling))]
3576const MAX_STATIC_INNER_SLICE_ALIGNMENT: usize = 16;
3577
3578static STATIC_INNER_SLICE: SliceArcInnerForStatic = SliceArcInnerForStatic {
3579    inner: ArcInner {
3580        strong: atomic::AtomicUsize::new(1),
3581        weak: atomic::AtomicUsize::new(1),
3582        data: [0],
3583    },
3584};
3585
3586#[cfg(not(no_global_oom_handling))]
3587#[stable(feature = "more_rc_default_impls", since = "1.80.0")]
3588impl Default for Arc<str> {
3589    /// Creates an empty str inside an Arc
3590    ///
3591    /// This may or may not share an allocation with other Arcs.
3592    #[inline]
3593    fn default() -> Self {
3594        let arc: Arc<[u8]> = Default::default();
3595        debug_assert!(core::str::from_utf8(&*arc).is_ok());
3596        let (ptr, alloc) = Arc::into_inner_with_allocator(arc);
3597        unsafe { Arc::from_ptr_in(ptr.as_ptr() as *mut ArcInner<str>, alloc) }
3598    }
3599}
3600
3601#[cfg(not(no_global_oom_handling))]
3602#[stable(feature = "more_rc_default_impls", since = "1.80.0")]
3603impl Default for Arc<core::ffi::CStr> {
3604    /// Creates an empty CStr inside an Arc
3605    ///
3606    /// This may or may not share an allocation with other Arcs.
3607    #[inline]
3608    fn default() -> Self {
3609        use core::ffi::CStr;
3610        let inner: NonNull<ArcInner<[u8]>> = NonNull::from(&STATIC_INNER_SLICE.inner);
3611        let inner: NonNull<ArcInner<CStr>> =
3612            NonNull::new(inner.as_ptr() as *mut ArcInner<CStr>).unwrap();
3613        // `this` semantically is the Arc "owned" by the static, so make sure not to drop it.
3614        let this: mem::ManuallyDrop<Arc<CStr>> =
3615            unsafe { mem::ManuallyDrop::new(Arc::from_inner(inner)) };
3616        (*this).clone()
3617    }
3618}
3619
3620#[cfg(not(no_global_oom_handling))]
3621#[stable(feature = "more_rc_default_impls", since = "1.80.0")]
3622impl<T> Default for Arc<[T]> {
3623    /// Creates an empty `[T]` inside an Arc
3624    ///
3625    /// This may or may not share an allocation with other Arcs.
3626    #[inline]
3627    fn default() -> Self {
3628        if align_of::<T>() <= MAX_STATIC_INNER_SLICE_ALIGNMENT {
3629            // We take a reference to the whole struct instead of the ArcInner<[u8; 1]> inside it so
3630            // we don't shrink the range of bytes the ptr is allowed to access under Stacked Borrows.
3631            // (Miri complains on 32-bit targets with Arc<[Align16]> otherwise.)
3632            // (Note that NonNull::from(&STATIC_INNER_SLICE.inner) is fine under Tree Borrows.)
3633            let inner: NonNull<SliceArcInnerForStatic> = NonNull::from(&STATIC_INNER_SLICE);
3634            let inner: NonNull<ArcInner<[T; 0]>> = inner.cast();
3635            // `this` semantically is the Arc "owned" by the static, so make sure not to drop it.
3636            let this: mem::ManuallyDrop<Arc<[T; 0]>> =
3637                unsafe { mem::ManuallyDrop::new(Arc::from_inner(inner)) };
3638            return (*this).clone();
3639        }
3640
3641        // If T's alignment is too large for the static, make a new unique allocation.
3642        let arr: [T; 0] = [];
3643        Arc::from(arr)
3644    }
3645}
3646
3647#[cfg(not(no_global_oom_handling))]
3648#[stable(feature = "pin_default_impls", since = "CURRENT_RUSTC_VERSION")]
3649impl<T> Default for Pin<Arc<T>>
3650where
3651    T: ?Sized,
3652    Arc<T>: Default,
3653{
3654    #[inline]
3655    fn default() -> Self {
3656        unsafe { Pin::new_unchecked(Arc::<T>::default()) }
3657    }
3658}
3659
3660#[stable(feature = "rust1", since = "1.0.0")]
3661impl<T: ?Sized + Hash, A: Allocator> Hash for Arc<T, A> {
3662    fn hash<H: Hasher>(&self, state: &mut H) {
3663        (**self).hash(state)
3664    }
3665}
3666
3667#[cfg(not(no_global_oom_handling))]
3668#[stable(feature = "from_for_ptrs", since = "1.6.0")]
3669impl<T> From<T> for Arc<T> {
3670    /// Converts a `T` into an `Arc<T>`
3671    ///
3672    /// The conversion moves the value into a
3673    /// newly allocated `Arc`. It is equivalent to
3674    /// calling `Arc::new(t)`.
3675    ///
3676    /// # Example
3677    /// ```rust
3678    /// # use std::sync::Arc;
3679    /// let x = 5;
3680    /// let arc = Arc::new(5);
3681    ///
3682    /// assert_eq!(Arc::from(x), arc);
3683    /// ```
3684    fn from(t: T) -> Self {
3685        Arc::new(t)
3686    }
3687}
3688
3689#[cfg(not(no_global_oom_handling))]
3690#[stable(feature = "shared_from_array", since = "1.74.0")]
3691impl<T, const N: usize> From<[T; N]> for Arc<[T]> {
3692    /// Converts a [`[T; N]`](prim@array) into an `Arc<[T]>`.
3693    ///
3694    /// The conversion moves the array into a newly allocated `Arc`.
3695    ///
3696    /// # Example
3697    ///
3698    /// ```
3699    /// # use std::sync::Arc;
3700    /// let original: [i32; 3] = [1, 2, 3];
3701    /// let shared: Arc<[i32]> = Arc::from(original);
3702    /// assert_eq!(&[1, 2, 3], &shared[..]);
3703    /// ```
3704    #[inline]
3705    fn from(v: [T; N]) -> Arc<[T]> {
3706        Arc::<[T; N]>::from(v)
3707    }
3708}
3709
3710#[cfg(not(no_global_oom_handling))]
3711#[stable(feature = "shared_from_slice", since = "1.21.0")]
3712impl<T: Clone> From<&[T]> for Arc<[T]> {
3713    /// Allocates a reference-counted slice and fills it by cloning `v`'s items.
3714    ///
3715    /// # Example
3716    ///
3717    /// ```
3718    /// # use std::sync::Arc;
3719    /// let original: &[i32] = &[1, 2, 3];
3720    /// let shared: Arc<[i32]> = Arc::from(original);
3721    /// assert_eq!(&[1, 2, 3], &shared[..]);
3722    /// ```
3723    #[inline]
3724    fn from(v: &[T]) -> Arc<[T]> {
3725        <Self as ArcFromSlice<T>>::from_slice(v)
3726    }
3727}
3728
3729#[cfg(not(no_global_oom_handling))]
3730#[stable(feature = "shared_from_mut_slice", since = "1.84.0")]
3731impl<T: Clone> From<&mut [T]> for Arc<[T]> {
3732    /// Allocates a reference-counted slice and fills it by cloning `v`'s items.
3733    ///
3734    /// # Example
3735    ///
3736    /// ```
3737    /// # use std::sync::Arc;
3738    /// let mut original = [1, 2, 3];
3739    /// let original: &mut [i32] = &mut original;
3740    /// let shared: Arc<[i32]> = Arc::from(original);
3741    /// assert_eq!(&[1, 2, 3], &shared[..]);
3742    /// ```
3743    #[inline]
3744    fn from(v: &mut [T]) -> Arc<[T]> {
3745        Arc::from(&*v)
3746    }
3747}
3748
3749#[cfg(not(no_global_oom_handling))]
3750#[stable(feature = "shared_from_slice", since = "1.21.0")]
3751impl From<&str> for Arc<str> {
3752    /// Allocates a reference-counted `str` and copies `v` into it.
3753    ///
3754    /// # Example
3755    ///
3756    /// ```
3757    /// # use std::sync::Arc;
3758    /// let shared: Arc<str> = Arc::from("eggplant");
3759    /// assert_eq!("eggplant", &shared[..]);
3760    /// ```
3761    #[inline]
3762    fn from(v: &str) -> Arc<str> {
3763        let arc = Arc::<[u8]>::from(v.as_bytes());
3764        unsafe { Arc::from_raw(Arc::into_raw(arc) as *const str) }
3765    }
3766}
3767
3768#[cfg(not(no_global_oom_handling))]
3769#[stable(feature = "shared_from_mut_slice", since = "1.84.0")]
3770impl From<&mut str> for Arc<str> {
3771    /// Allocates a reference-counted `str` and copies `v` into it.
3772    ///
3773    /// # Example
3774    ///
3775    /// ```
3776    /// # use std::sync::Arc;
3777    /// let mut original = String::from("eggplant");
3778    /// let original: &mut str = &mut original;
3779    /// let shared: Arc<str> = Arc::from(original);
3780    /// assert_eq!("eggplant", &shared[..]);
3781    /// ```
3782    #[inline]
3783    fn from(v: &mut str) -> Arc<str> {
3784        Arc::from(&*v)
3785    }
3786}
3787
3788#[cfg(not(no_global_oom_handling))]
3789#[stable(feature = "shared_from_slice", since = "1.21.0")]
3790impl From<String> for Arc<str> {
3791    /// Allocates a reference-counted `str` and copies `v` into it.
3792    ///
3793    /// # Example
3794    ///
3795    /// ```
3796    /// # use std::sync::Arc;
3797    /// let unique: String = "eggplant".to_owned();
3798    /// let shared: Arc<str> = Arc::from(unique);
3799    /// assert_eq!("eggplant", &shared[..]);
3800    /// ```
3801    #[inline]
3802    fn from(v: String) -> Arc<str> {
3803        Arc::from(&v[..])
3804    }
3805}
3806
3807#[cfg(not(no_global_oom_handling))]
3808#[stable(feature = "shared_from_slice", since = "1.21.0")]
3809impl<T: ?Sized, A: Allocator> From<Box<T, A>> for Arc<T, A> {
3810    /// Move a boxed object to a new, reference-counted allocation.
3811    ///
3812    /// # Example
3813    ///
3814    /// ```
3815    /// # use std::sync::Arc;
3816    /// let unique: Box<str> = Box::from("eggplant");
3817    /// let shared: Arc<str> = Arc::from(unique);
3818    /// assert_eq!("eggplant", &shared[..]);
3819    /// ```
3820    #[inline]
3821    fn from(v: Box<T, A>) -> Arc<T, A> {
3822        Arc::from_box_in(v)
3823    }
3824}
3825
3826#[cfg(not(no_global_oom_handling))]
3827#[stable(feature = "shared_from_slice", since = "1.21.0")]
3828impl<T, A: Allocator + Clone> From<Vec<T, A>> for Arc<[T], A> {
3829    /// Allocates a reference-counted slice and moves `v`'s items into it.
3830    ///
3831    /// # Example
3832    ///
3833    /// ```
3834    /// # use std::sync::Arc;
3835    /// let unique: Vec<i32> = vec![1, 2, 3];
3836    /// let shared: Arc<[i32]> = Arc::from(unique);
3837    /// assert_eq!(&[1, 2, 3], &shared[..]);
3838    /// ```
3839    #[inline]
3840    fn from(v: Vec<T, A>) -> Arc<[T], A> {
3841        unsafe {
3842            let (vec_ptr, len, cap, alloc) = v.into_raw_parts_with_alloc();
3843
3844            let rc_ptr = Self::allocate_for_slice_in(len, &alloc);
3845            ptr::copy_nonoverlapping(vec_ptr, (&raw mut (*rc_ptr).data) as *mut T, len);
3846
3847            // Create a `Vec<T, &A>` with length 0, to deallocate the buffer
3848            // without dropping its contents or the allocator
3849            let _ = Vec::from_raw_parts_in(vec_ptr, 0, cap, &alloc);
3850
3851            Self::from_ptr_in(rc_ptr, alloc)
3852        }
3853    }
3854}
3855
3856#[stable(feature = "shared_from_cow", since = "1.45.0")]
3857impl<'a, B> From<Cow<'a, B>> for Arc<B>
3858where
3859    B: ToOwned + ?Sized,
3860    Arc<B>: From<&'a B> + From<B::Owned>,
3861{
3862    /// Creates an atomically reference-counted pointer from a clone-on-write
3863    /// pointer by copying its content.
3864    ///
3865    /// # Example
3866    ///
3867    /// ```rust
3868    /// # use std::sync::Arc;
3869    /// # use std::borrow::Cow;
3870    /// let cow: Cow<'_, str> = Cow::Borrowed("eggplant");
3871    /// let shared: Arc<str> = Arc::from(cow);
3872    /// assert_eq!("eggplant", &shared[..]);
3873    /// ```
3874    #[inline]
3875    fn from(cow: Cow<'a, B>) -> Arc<B> {
3876        match cow {
3877            Cow::Borrowed(s) => Arc::from(s),
3878            Cow::Owned(s) => Arc::from(s),
3879        }
3880    }
3881}
3882
3883#[stable(feature = "shared_from_str", since = "1.62.0")]
3884impl From<Arc<str>> for Arc<[u8]> {
3885    /// Converts an atomically reference-counted string slice into a byte slice.
3886    ///
3887    /// # Example
3888    ///
3889    /// ```
3890    /// # use std::sync::Arc;
3891    /// let string: Arc<str> = Arc::from("eggplant");
3892    /// let bytes: Arc<[u8]> = Arc::from(string);
3893    /// assert_eq!("eggplant".as_bytes(), bytes.as_ref());
3894    /// ```
3895    #[inline]
3896    fn from(rc: Arc<str>) -> Self {
3897        // SAFETY: `str` has the same layout as `[u8]`.
3898        unsafe { Arc::from_raw(Arc::into_raw(rc) as *const [u8]) }
3899    }
3900}
3901
3902#[stable(feature = "boxed_slice_try_from", since = "1.43.0")]
3903impl<T, A: Allocator, const N: usize> TryFrom<Arc<[T], A>> for Arc<[T; N], A> {
3904    type Error = Arc<[T], A>;
3905
3906    fn try_from(boxed_slice: Arc<[T], A>) -> Result<Self, Self::Error> {
3907        if boxed_slice.len() == N {
3908            let (ptr, alloc) = Arc::into_inner_with_allocator(boxed_slice);
3909            Ok(unsafe { Arc::from_inner_in(ptr.cast(), alloc) })
3910        } else {
3911            Err(boxed_slice)
3912        }
3913    }
3914}
3915
3916#[cfg(not(no_global_oom_handling))]
3917#[stable(feature = "shared_from_iter", since = "1.37.0")]
3918impl<T> FromIterator<T> for Arc<[T]> {
3919    /// Takes each element in the `Iterator` and collects it into an `Arc<[T]>`.
3920    ///
3921    /// # Performance characteristics
3922    ///
3923    /// ## The general case
3924    ///
3925    /// In the general case, collecting into `Arc<[T]>` is done by first
3926    /// collecting into a `Vec<T>`. That is, when writing the following:
3927    ///
3928    /// ```rust
3929    /// # use std::sync::Arc;
3930    /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0).collect();
3931    /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
3932    /// ```
3933    ///
3934    /// this behaves as if we wrote:
3935    ///
3936    /// ```rust
3937    /// # use std::sync::Arc;
3938    /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0)
3939    ///     .collect::<Vec<_>>() // The first set of allocations happens here.
3940    ///     .into(); // A second allocation for `Arc<[T]>` happens here.
3941    /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
3942    /// ```
3943    ///
3944    /// This will allocate as many times as needed for constructing the `Vec<T>`
3945    /// and then it will allocate once for turning the `Vec<T>` into the `Arc<[T]>`.
3946    ///
3947    /// ## Iterators of known length
3948    ///
3949    /// When your `Iterator` implements `TrustedLen` and is of an exact size,
3950    /// a single allocation will be made for the `Arc<[T]>`. For example:
3951    ///
3952    /// ```rust
3953    /// # use std::sync::Arc;
3954    /// let evens: Arc<[u8]> = (0..10).collect(); // Just a single allocation happens here.
3955    /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
3956    /// ```
3957    fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
3958        ToArcSlice::to_arc_slice(iter.into_iter())
3959    }
3960}
3961
3962#[cfg(not(no_global_oom_handling))]
3963/// Specialization trait used for collecting into `Arc<[T]>`.
3964trait ToArcSlice<T>: Iterator<Item = T> + Sized {
3965    fn to_arc_slice(self) -> Arc<[T]>;
3966}
3967
3968#[cfg(not(no_global_oom_handling))]
3969impl<T, I: Iterator<Item = T>> ToArcSlice<T> for I {
3970    default fn to_arc_slice(self) -> Arc<[T]> {
3971        self.collect::<Vec<T>>().into()
3972    }
3973}
3974
3975#[cfg(not(no_global_oom_handling))]
3976impl<T, I: iter::TrustedLen<Item = T>> ToArcSlice<T> for I {
3977    fn to_arc_slice(self) -> Arc<[T]> {
3978        // This is the case for a `TrustedLen` iterator.
3979        let (low, high) = self.size_hint();
3980        if let Some(high) = high {
3981            debug_assert_eq!(
3982                low,
3983                high,
3984                "TrustedLen iterator's size hint is not exact: {:?}",
3985                (low, high)
3986            );
3987
3988            unsafe {
3989                // SAFETY: We need to ensure that the iterator has an exact length and we have.
3990                Arc::from_iter_exact(self, low)
3991            }
3992        } else {
3993            // TrustedLen contract guarantees that `upper_bound == None` implies an iterator
3994            // length exceeding `usize::MAX`.
3995            // The default implementation would collect into a vec which would panic.
3996            // Thus we panic here immediately without invoking `Vec` code.
3997            panic!("capacity overflow");
3998        }
3999    }
4000}
4001
4002#[stable(feature = "rust1", since = "1.0.0")]
4003impl<T: ?Sized, A: Allocator> borrow::Borrow<T> for Arc<T, A> {
4004    fn borrow(&self) -> &T {
4005        &**self
4006    }
4007}
4008
4009#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
4010impl<T: ?Sized, A: Allocator> AsRef<T> for Arc<T, A> {
4011    fn as_ref(&self) -> &T {
4012        &**self
4013    }
4014}
4015
4016#[stable(feature = "pin", since = "1.33.0")]
4017impl<T: ?Sized, A: Allocator> Unpin for Arc<T, A> {}
4018
4019/// Gets the offset within an `ArcInner` for the payload behind a pointer.
4020///
4021/// # Safety
4022///
4023/// The pointer must point to (and have valid metadata for) a previously
4024/// valid instance of T, but the T is allowed to be dropped.
4025unsafe fn data_offset<T: ?Sized>(ptr: *const T) -> usize {
4026    // Align the unsized value to the end of the ArcInner.
4027    // Because RcInner is repr(C), it will always be the last field in memory.
4028    // SAFETY: since the only unsized types possible are slices, trait objects,
4029    // and extern types, the input safety requirement is currently enough to
4030    // satisfy the requirements of align_of_val_raw; this is an implementation
4031    // detail of the language that must not be relied upon outside of std.
4032    unsafe { data_offset_align(align_of_val_raw(ptr)) }
4033}
4034
4035#[inline]
4036fn data_offset_align(align: usize) -> usize {
4037    let layout = Layout::new::<ArcInner<()>>();
4038    layout.size() + layout.padding_needed_for(align)
4039}
4040
4041/// A unique owning pointer to an [`ArcInner`] **that does not imply the contents are initialized,**
4042/// but will deallocate it (without dropping the value) when dropped.
4043///
4044/// This is a helper for [`Arc::make_mut()`] to ensure correct cleanup on panic.
4045#[cfg(not(no_global_oom_handling))]
4046struct UniqueArcUninit<T: ?Sized, A: Allocator> {
4047    ptr: NonNull<ArcInner<T>>,
4048    layout_for_value: Layout,
4049    alloc: Option<A>,
4050}
4051
4052#[cfg(not(no_global_oom_handling))]
4053impl<T: ?Sized, A: Allocator> UniqueArcUninit<T, A> {
4054    /// Allocates an ArcInner with layout suitable to contain `for_value` or a clone of it.
4055    fn new(for_value: &T, alloc: A) -> UniqueArcUninit<T, A> {
4056        let layout = Layout::for_value(for_value);
4057        let ptr = unsafe {
4058            Arc::allocate_for_layout(
4059                layout,
4060                |layout_for_arcinner| alloc.allocate(layout_for_arcinner),
4061                |mem| mem.with_metadata_of(ptr::from_ref(for_value) as *const ArcInner<T>),
4062            )
4063        };
4064        Self { ptr: NonNull::new(ptr).unwrap(), layout_for_value: layout, alloc: Some(alloc) }
4065    }
4066
4067    /// Returns the pointer to be written into to initialize the [`Arc`].
4068    fn data_ptr(&mut self) -> *mut T {
4069        let offset = data_offset_align(self.layout_for_value.align());
4070        unsafe { self.ptr.as_ptr().byte_add(offset) as *mut T }
4071    }
4072
4073    /// Upgrade this into a normal [`Arc`].
4074    ///
4075    /// # Safety
4076    ///
4077    /// The data must have been initialized (by writing to [`Self::data_ptr()`]).
4078    unsafe fn into_arc(self) -> Arc<T, A> {
4079        let mut this = ManuallyDrop::new(self);
4080        let ptr = this.ptr.as_ptr();
4081        let alloc = this.alloc.take().unwrap();
4082
4083        // SAFETY: The pointer is valid as per `UniqueArcUninit::new`, and the caller is responsible
4084        // for having initialized the data.
4085        unsafe { Arc::from_ptr_in(ptr, alloc) }
4086    }
4087}
4088
4089#[cfg(not(no_global_oom_handling))]
4090impl<T: ?Sized, A: Allocator> Drop for UniqueArcUninit<T, A> {
4091    fn drop(&mut self) {
4092        // SAFETY:
4093        // * new() produced a pointer safe to deallocate.
4094        // * We own the pointer unless into_arc() was called, which forgets us.
4095        unsafe {
4096            self.alloc.take().unwrap().deallocate(
4097                self.ptr.cast(),
4098                arcinner_layout_for_value_layout(self.layout_for_value),
4099            );
4100        }
4101    }
4102}
4103
4104#[stable(feature = "arc_error", since = "1.52.0")]
4105impl<T: core::error::Error + ?Sized> core::error::Error for Arc<T> {
4106    #[allow(deprecated)]
4107    fn cause(&self) -> Option<&dyn core::error::Error> {
4108        core::error::Error::cause(&**self)
4109    }
4110
4111    fn source(&self) -> Option<&(dyn core::error::Error + 'static)> {
4112        core::error::Error::source(&**self)
4113    }
4114
4115    fn provide<'a>(&'a self, req: &mut core::error::Request<'a>) {
4116        core::error::Error::provide(&**self, req);
4117    }
4118}
4119
4120/// A uniquely owned [`Arc`].
4121///
4122/// This represents an `Arc` that is known to be uniquely owned -- that is, have exactly one strong
4123/// reference. Multiple weak pointers can be created, but attempts to upgrade those to strong
4124/// references will fail unless the `UniqueArc` they point to has been converted into a regular `Arc`.
4125///
4126/// Because it is uniquely owned, the contents of a `UniqueArc` can be freely mutated. A common
4127/// use case is to have an object be mutable during its initialization phase but then have it become
4128/// immutable and converted to a normal `Arc`.
4129///
4130/// This can be used as a flexible way to create cyclic data structures, as in the example below.
4131///
4132/// ```
4133/// #![feature(unique_rc_arc)]
4134/// use std::sync::{Arc, Weak, UniqueArc};
4135///
4136/// struct Gadget {
4137///     me: Weak<Gadget>,
4138/// }
4139///
4140/// fn create_gadget() -> Option<Arc<Gadget>> {
4141///     let mut rc = UniqueArc::new(Gadget {
4142///         me: Weak::new(),
4143///     });
4144///     rc.me = UniqueArc::downgrade(&rc);
4145///     Some(UniqueArc::into_arc(rc))
4146/// }
4147///
4148/// create_gadget().unwrap();
4149/// ```
4150///
4151/// An advantage of using `UniqueArc` over [`Arc::new_cyclic`] to build cyclic data structures is that
4152/// [`Arc::new_cyclic`]'s `data_fn` parameter cannot be async or return a [`Result`]. As shown in the
4153/// previous example, `UniqueArc` allows for more flexibility in the construction of cyclic data,
4154/// including fallible or async constructors.
4155#[unstable(feature = "unique_rc_arc", issue = "112566")]
4156pub struct UniqueArc<
4157    T: ?Sized,
4158    #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
4159> {
4160    ptr: NonNull<ArcInner<T>>,
4161    // Define the ownership of `ArcInner<T>` for drop-check
4162    _marker: PhantomData<ArcInner<T>>,
4163    // Invariance is necessary for soundness: once other `Weak`
4164    // references exist, we already have a form of shared mutability!
4165    _marker2: PhantomData<*mut T>,
4166    alloc: A,
4167}
4168
4169#[unstable(feature = "unique_rc_arc", issue = "112566")]
4170unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Send> Send for UniqueArc<T, A> {}
4171
4172#[unstable(feature = "unique_rc_arc", issue = "112566")]
4173unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Sync> Sync for UniqueArc<T, A> {}
4174
4175#[unstable(feature = "unique_rc_arc", issue = "112566")]
4176// #[unstable(feature = "coerce_unsized", issue = "18598")]
4177impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<UniqueArc<U, A>>
4178    for UniqueArc<T, A>
4179{
4180}
4181
4182//#[unstable(feature = "unique_rc_arc", issue = "112566")]
4183#[unstable(feature = "dispatch_from_dyn", issue = "none")]
4184impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<UniqueArc<U>> for UniqueArc<T> {}
4185
4186#[unstable(feature = "unique_rc_arc", issue = "112566")]
4187impl<T: ?Sized + fmt::Display, A: Allocator> fmt::Display for UniqueArc<T, A> {
4188    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4189        fmt::Display::fmt(&**self, f)
4190    }
4191}
4192
4193#[unstable(feature = "unique_rc_arc", issue = "112566")]
4194impl<T: ?Sized + fmt::Debug, A: Allocator> fmt::Debug for UniqueArc<T, A> {
4195    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4196        fmt::Debug::fmt(&**self, f)
4197    }
4198}
4199
4200#[unstable(feature = "unique_rc_arc", issue = "112566")]
4201impl<T: ?Sized, A: Allocator> fmt::Pointer for UniqueArc<T, A> {
4202    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4203        fmt::Pointer::fmt(&(&raw const **self), f)
4204    }
4205}
4206
4207#[unstable(feature = "unique_rc_arc", issue = "112566")]
4208impl<T: ?Sized, A: Allocator> borrow::Borrow<T> for UniqueArc<T, A> {
4209    fn borrow(&self) -> &T {
4210        &**self
4211    }
4212}
4213
4214#[unstable(feature = "unique_rc_arc", issue = "112566")]
4215impl<T: ?Sized, A: Allocator> borrow::BorrowMut<T> for UniqueArc<T, A> {
4216    fn borrow_mut(&mut self) -> &mut T {
4217        &mut **self
4218    }
4219}
4220
4221#[unstable(feature = "unique_rc_arc", issue = "112566")]
4222impl<T: ?Sized, A: Allocator> AsRef<T> for UniqueArc<T, A> {
4223    fn as_ref(&self) -> &T {
4224        &**self
4225    }
4226}
4227
4228#[unstable(feature = "unique_rc_arc", issue = "112566")]
4229impl<T: ?Sized, A: Allocator> AsMut<T> for UniqueArc<T, A> {
4230    fn as_mut(&mut self) -> &mut T {
4231        &mut **self
4232    }
4233}
4234
4235#[unstable(feature = "unique_rc_arc", issue = "112566")]
4236impl<T: ?Sized, A: Allocator> Unpin for UniqueArc<T, A> {}
4237
4238#[unstable(feature = "unique_rc_arc", issue = "112566")]
4239impl<T: ?Sized + PartialEq, A: Allocator> PartialEq for UniqueArc<T, A> {
4240    /// Equality for two `UniqueArc`s.
4241    ///
4242    /// Two `UniqueArc`s are equal if their inner values are equal.
4243    ///
4244    /// # Examples
4245    ///
4246    /// ```
4247    /// #![feature(unique_rc_arc)]
4248    /// use std::sync::UniqueArc;
4249    ///
4250    /// let five = UniqueArc::new(5);
4251    ///
4252    /// assert!(five == UniqueArc::new(5));
4253    /// ```
4254    #[inline]
4255    fn eq(&self, other: &Self) -> bool {
4256        PartialEq::eq(&**self, &**other)
4257    }
4258}
4259
4260#[unstable(feature = "unique_rc_arc", issue = "112566")]
4261impl<T: ?Sized + PartialOrd, A: Allocator> PartialOrd for UniqueArc<T, A> {
4262    /// Partial comparison for two `UniqueArc`s.
4263    ///
4264    /// The two are compared by calling `partial_cmp()` on their inner values.
4265    ///
4266    /// # Examples
4267    ///
4268    /// ```
4269    /// #![feature(unique_rc_arc)]
4270    /// use std::sync::UniqueArc;
4271    /// use std::cmp::Ordering;
4272    ///
4273    /// let five = UniqueArc::new(5);
4274    ///
4275    /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&UniqueArc::new(6)));
4276    /// ```
4277    #[inline(always)]
4278    fn partial_cmp(&self, other: &UniqueArc<T, A>) -> Option<Ordering> {
4279        (**self).partial_cmp(&**other)
4280    }
4281
4282    /// Less-than comparison for two `UniqueArc`s.
4283    ///
4284    /// The two are compared by calling `<` on their inner values.
4285    ///
4286    /// # Examples
4287    ///
4288    /// ```
4289    /// #![feature(unique_rc_arc)]
4290    /// use std::sync::UniqueArc;
4291    ///
4292    /// let five = UniqueArc::new(5);
4293    ///
4294    /// assert!(five < UniqueArc::new(6));
4295    /// ```
4296    #[inline(always)]
4297    fn lt(&self, other: &UniqueArc<T, A>) -> bool {
4298        **self < **other
4299    }
4300
4301    /// 'Less than or equal to' comparison for two `UniqueArc`s.
4302    ///
4303    /// The two are compared by calling `<=` on their inner values.
4304    ///
4305    /// # Examples
4306    ///
4307    /// ```
4308    /// #![feature(unique_rc_arc)]
4309    /// use std::sync::UniqueArc;
4310    ///
4311    /// let five = UniqueArc::new(5);
4312    ///
4313    /// assert!(five <= UniqueArc::new(5));
4314    /// ```
4315    #[inline(always)]
4316    fn le(&self, other: &UniqueArc<T, A>) -> bool {
4317        **self <= **other
4318    }
4319
4320    /// Greater-than comparison for two `UniqueArc`s.
4321    ///
4322    /// The two are compared by calling `>` on their inner values.
4323    ///
4324    /// # Examples
4325    ///
4326    /// ```
4327    /// #![feature(unique_rc_arc)]
4328    /// use std::sync::UniqueArc;
4329    ///
4330    /// let five = UniqueArc::new(5);
4331    ///
4332    /// assert!(five > UniqueArc::new(4));
4333    /// ```
4334    #[inline(always)]
4335    fn gt(&self, other: &UniqueArc<T, A>) -> bool {
4336        **self > **other
4337    }
4338
4339    /// 'Greater than or equal to' comparison for two `UniqueArc`s.
4340    ///
4341    /// The two are compared by calling `>=` on their inner values.
4342    ///
4343    /// # Examples
4344    ///
4345    /// ```
4346    /// #![feature(unique_rc_arc)]
4347    /// use std::sync::UniqueArc;
4348    ///
4349    /// let five = UniqueArc::new(5);
4350    ///
4351    /// assert!(five >= UniqueArc::new(5));
4352    /// ```
4353    #[inline(always)]
4354    fn ge(&self, other: &UniqueArc<T, A>) -> bool {
4355        **self >= **other
4356    }
4357}
4358
4359#[unstable(feature = "unique_rc_arc", issue = "112566")]
4360impl<T: ?Sized + Ord, A: Allocator> Ord for UniqueArc<T, A> {
4361    /// Comparison for two `UniqueArc`s.
4362    ///
4363    /// The two are compared by calling `cmp()` on their inner values.
4364    ///
4365    /// # Examples
4366    ///
4367    /// ```
4368    /// #![feature(unique_rc_arc)]
4369    /// use std::sync::UniqueArc;
4370    /// use std::cmp::Ordering;
4371    ///
4372    /// let five = UniqueArc::new(5);
4373    ///
4374    /// assert_eq!(Ordering::Less, five.cmp(&UniqueArc::new(6)));
4375    /// ```
4376    #[inline]
4377    fn cmp(&self, other: &UniqueArc<T, A>) -> Ordering {
4378        (**self).cmp(&**other)
4379    }
4380}
4381
4382#[unstable(feature = "unique_rc_arc", issue = "112566")]
4383impl<T: ?Sized + Eq, A: Allocator> Eq for UniqueArc<T, A> {}
4384
4385#[unstable(feature = "unique_rc_arc", issue = "112566")]
4386impl<T: ?Sized + Hash, A: Allocator> Hash for UniqueArc<T, A> {
4387    fn hash<H: Hasher>(&self, state: &mut H) {
4388        (**self).hash(state);
4389    }
4390}
4391
4392impl<T> UniqueArc<T, Global> {
4393    /// Creates a new `UniqueArc`.
4394    ///
4395    /// Weak references to this `UniqueArc` can be created with [`UniqueArc::downgrade`]. Upgrading
4396    /// these weak references will fail before the `UniqueArc` has been converted into an [`Arc`].
4397    /// After converting the `UniqueArc` into an [`Arc`], any weak references created beforehand will
4398    /// point to the new [`Arc`].
4399    #[cfg(not(no_global_oom_handling))]
4400    #[unstable(feature = "unique_rc_arc", issue = "112566")]
4401    #[must_use]
4402    pub fn new(value: T) -> Self {
4403        Self::new_in(value, Global)
4404    }
4405}
4406
4407impl<T, A: Allocator> UniqueArc<T, A> {
4408    /// Creates a new `UniqueArc` in the provided allocator.
4409    ///
4410    /// Weak references to this `UniqueArc` can be created with [`UniqueArc::downgrade`]. Upgrading
4411    /// these weak references will fail before the `UniqueArc` has been converted into an [`Arc`].
4412    /// After converting the `UniqueArc` into an [`Arc`], any weak references created beforehand will
4413    /// point to the new [`Arc`].
4414    #[cfg(not(no_global_oom_handling))]
4415    #[unstable(feature = "unique_rc_arc", issue = "112566")]
4416    #[must_use]
4417    // #[unstable(feature = "allocator_api", issue = "32838")]
4418    pub fn new_in(data: T, alloc: A) -> Self {
4419        let (ptr, alloc) = Box::into_unique(Box::new_in(
4420            ArcInner {
4421                strong: atomic::AtomicUsize::new(0),
4422                // keep one weak reference so if all the weak pointers that are created are dropped
4423                // the UniqueArc still stays valid.
4424                weak: atomic::AtomicUsize::new(1),
4425                data,
4426            },
4427            alloc,
4428        ));
4429        Self { ptr: ptr.into(), _marker: PhantomData, _marker2: PhantomData, alloc }
4430    }
4431}
4432
4433impl<T: ?Sized, A: Allocator> UniqueArc<T, A> {
4434    /// Converts the `UniqueArc` into a regular [`Arc`].
4435    ///
4436    /// This consumes the `UniqueArc` and returns a regular [`Arc`] that contains the `value` that
4437    /// is passed to `into_arc`.
4438    ///
4439    /// Any weak references created before this method is called can now be upgraded to strong
4440    /// references.
4441    #[unstable(feature = "unique_rc_arc", issue = "112566")]
4442    #[must_use]
4443    pub fn into_arc(this: Self) -> Arc<T, A> {
4444        let this = ManuallyDrop::new(this);
4445
4446        // Move the allocator out.
4447        // SAFETY: `this.alloc` will not be accessed again, nor dropped because it is in
4448        // a `ManuallyDrop`.
4449        let alloc: A = unsafe { ptr::read(&this.alloc) };
4450
4451        // SAFETY: This pointer was allocated at creation time so we know it is valid.
4452        unsafe {
4453            // Convert our weak reference into a strong reference
4454            (*this.ptr.as_ptr()).strong.store(1, Release);
4455            Arc::from_inner_in(this.ptr, alloc)
4456        }
4457    }
4458}
4459
4460impl<T: ?Sized, A: Allocator + Clone> UniqueArc<T, A> {
4461    /// Creates a new weak reference to the `UniqueArc`.
4462    ///
4463    /// Attempting to upgrade this weak reference will fail before the `UniqueArc` has been converted
4464    /// to a [`Arc`] using [`UniqueArc::into_arc`].
4465    #[unstable(feature = "unique_rc_arc", issue = "112566")]
4466    #[must_use]
4467    pub fn downgrade(this: &Self) -> Weak<T, A> {
4468        // Using a relaxed ordering is alright here, as knowledge of the
4469        // original reference prevents other threads from erroneously deleting
4470        // the object or converting the object to a normal `Arc<T, A>`.
4471        //
4472        // Note that we don't need to test if the weak counter is locked because there
4473        // are no such operations like `Arc::get_mut` or `Arc::make_mut` that will lock
4474        // the weak counter.
4475        //
4476        // SAFETY: This pointer was allocated at creation time so we know it is valid.
4477        let old_size = unsafe { (*this.ptr.as_ptr()).weak.fetch_add(1, Relaxed) };
4478
4479        // See comments in Arc::clone() for why we do this (for mem::forget).
4480        if old_size > MAX_REFCOUNT {
4481            abort();
4482        }
4483
4484        Weak { ptr: this.ptr, alloc: this.alloc.clone() }
4485    }
4486}
4487
4488#[unstable(feature = "unique_rc_arc", issue = "112566")]
4489impl<T: ?Sized, A: Allocator> Deref for UniqueArc<T, A> {
4490    type Target = T;
4491
4492    fn deref(&self) -> &T {
4493        // SAFETY: This pointer was allocated at creation time so we know it is valid.
4494        unsafe { &self.ptr.as_ref().data }
4495    }
4496}
4497
4498// #[unstable(feature = "unique_rc_arc", issue = "112566")]
4499#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")]
4500unsafe impl<T: ?Sized> PinCoerceUnsized for UniqueArc<T> {}
4501
4502#[unstable(feature = "unique_rc_arc", issue = "112566")]
4503impl<T: ?Sized, A: Allocator> DerefMut for UniqueArc<T, A> {
4504    fn deref_mut(&mut self) -> &mut T {
4505        // SAFETY: This pointer was allocated at creation time so we know it is valid. We know we
4506        // have unique ownership and therefore it's safe to make a mutable reference because
4507        // `UniqueArc` owns the only strong reference to itself.
4508        // We also need to be careful to only create a mutable reference to the `data` field,
4509        // as a mutable reference to the entire `ArcInner` would assert uniqueness over the
4510        // ref count fields too, invalidating any attempt by `Weak`s to access the ref count.
4511        unsafe { &mut (*self.ptr.as_ptr()).data }
4512    }
4513}
4514
4515#[unstable(feature = "unique_rc_arc", issue = "112566")]
4516// #[unstable(feature = "deref_pure_trait", issue = "87121")]
4517unsafe impl<T: ?Sized, A: Allocator> DerefPure for UniqueArc<T, A> {}
4518
4519#[unstable(feature = "unique_rc_arc", issue = "112566")]
4520unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for UniqueArc<T, A> {
4521    fn drop(&mut self) {
4522        // See `Arc::drop_slow` which drops an `Arc` with a strong count of 0.
4523        // SAFETY: This pointer was allocated at creation time so we know it is valid.
4524        let _weak = Weak { ptr: self.ptr, alloc: &self.alloc };
4525
4526        unsafe { ptr::drop_in_place(&mut (*self.ptr.as_ptr()).data) };
4527    }
4528}