Auto merge of #155029 - jhpratt:rollup-n6DjhvP, r=jhpratt

Rollup of 3 pull requests

Successful merges:

 - rust-lang/rust#153038 (core::sync: rename `Exclusive` to `SyncView` and make improvements)
 - rust-lang/rust#154955 (Fix pattern types rendering in rustdoc)
 - rust-lang/rust#155026 (Move `maybe_loop_headers` out of `rustc_middle`.)
This commit is contained in:
bors
2026-04-09 06:31:38 +00:00
13 changed files with 475 additions and 368 deletions
-29
View File
@@ -1,29 +0,0 @@
use rustc_index::bit_set::DenseBitSet;
use super::*;
/// Compute the set of loop headers in the given body. A loop header is usually defined as a block
/// which dominates one of its predecessors. This definition is only correct for reducible CFGs.
/// However, computing dominators is expensive, so we approximate according to the post-order
/// traversal order. A loop header for us is a block which is visited after its predecessor in
/// post-order. This is ok as we mostly need a heuristic.
pub fn maybe_loop_headers(body: &Body<'_>) -> DenseBitSet<BasicBlock> {
let mut maybe_loop_headers = DenseBitSet::new_empty(body.basic_blocks.len());
let mut visited = DenseBitSet::new_empty(body.basic_blocks.len());
for (bb, bbdata) in traversal::postorder(body) {
// Post-order means we visit successors before the block for acyclic CFGs.
// If the successor is not visited yet, consider it a loop header.
for succ in bbdata.terminator().successors() {
if !visited.contains(succ) {
maybe_loop_headers.insert(succ);
}
}
// Only mark `bb` as visited after we checked the successors, in case we have a self-loop.
// bb1: goto -> bb1;
let _new = visited.insert(bb);
debug_assert!(_new);
}
maybe_loop_headers
}
-2
View File
@@ -48,8 +48,6 @@
mod statement;
mod syntax;
mod terminator;
pub mod loops;
pub mod traversal;
pub mod visit;
@@ -97,7 +97,7 @@ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
ecx: InterpCx::new(tcx, DUMMY_SP, typing_env, DummyMachine),
body,
map: Map::new(tcx, body, PlaceCollectionMode::OnDemand),
maybe_loop_headers: loops::maybe_loop_headers(body),
maybe_loop_headers: maybe_loop_headers(body),
entry_states: IndexVec::from_elem(ConditionSet::default(), &body.basic_blocks),
};
@@ -1100,3 +1100,29 @@ fn apply_chain(
Some(new_target)
}
}
/// Compute the set of loop headers in the given body. A loop header is usually defined as a block
/// which dominates one of its predecessors. This definition is only correct for reducible CFGs.
/// However, computing dominators is expensive, so we approximate according to the post-order
/// traversal order. A loop header for us is a block which is visited after its predecessor in
/// post-order. This is ok as we mostly need a heuristic.
fn maybe_loop_headers(body: &Body<'_>) -> DenseBitSet<BasicBlock> {
let mut maybe_loop_headers = DenseBitSet::new_empty(body.basic_blocks.len());
let mut visited = DenseBitSet::new_empty(body.basic_blocks.len());
for (bb, bbdata) in traversal::postorder(body) {
// Post-order means we visit successors before the block for acyclic CFGs.
// If the successor is not visited yet, consider it a loop header.
for succ in bbdata.terminator().successors() {
if !visited.contains(succ) {
maybe_loop_headers.insert(succ);
}
}
// Only mark `bb` as visited after we checked the successors, in case we have a self-loop.
// bb1: goto -> bb1;
let _new = visited.insert(bb);
debug_assert!(_new);
}
maybe_loop_headers
}
-322
View File
@@ -1,322 +0,0 @@
//! Defines [`Exclusive`].
use core::clone::TrivialClone;
use core::cmp::Ordering;
use core::fmt;
use core::future::Future;
use core::hash::{Hash, Hasher};
use core::marker::{StructuralPartialEq, Tuple};
use core::ops::{Coroutine, CoroutineState};
use core::pin::Pin;
use core::task::{Context, Poll};
/// `Exclusive` provides _mutable_ access, also referred to as _exclusive_
/// access to the underlying value. However, it only permits _immutable_, or _shared_
/// access to the underlying value when that value is [`Sync`].
///
/// While this may seem not very useful, it allows `Exclusive` to _unconditionally_
/// implement `Sync`. Indeed, the safety requirements of `Sync` state that for `Exclusive`
/// to be `Sync`, it must be sound to _share_ across threads, that is, it must be sound
/// for `&Exclusive` to cross thread boundaries. By design, a `&Exclusive<T>` for non-`Sync` T
/// has no API whatsoever, making it useless, thus harmless, thus memory safe.
///
/// Certain constructs like [`Future`]s can only be used with _exclusive_ access,
/// and are often `Send` but not `Sync`, so `Exclusive` can be used as hint to the
/// Rust compiler that something is `Sync` in practice.
///
/// ## Examples
///
/// Using a non-`Sync` future prevents the wrapping struct from being `Sync`:
///
/// ```compile_fail
/// use core::cell::Cell;
///
/// async fn other() {}
/// fn assert_sync<T: Sync>(t: T) {}
/// struct State<F> {
/// future: F
/// }
///
/// assert_sync(State {
/// future: async {
/// let cell = Cell::new(1);
/// let cell_ref = &cell;
/// other().await;
/// let value = cell_ref.get();
/// }
/// });
/// ```
///
/// `Exclusive` ensures the struct is `Sync` without stripping the future of its
/// functionality:
///
/// ```
/// #![feature(exclusive_wrapper)]
/// use core::cell::Cell;
/// use core::sync::Exclusive;
///
/// async fn other() {}
/// fn assert_sync<T: Sync>(t: T) {}
/// struct State<F> {
/// future: Exclusive<F>
/// }
///
/// assert_sync(State {
/// future: Exclusive::new(async {
/// let cell = Cell::new(1);
/// let cell_ref = &cell;
/// other().await;
/// let value = cell_ref.get();
/// })
/// });
/// ```
///
/// ## Parallels with a mutex
///
/// In some sense, `Exclusive` can be thought of as a _compile-time_ version of
/// a mutex, as the borrow-checker guarantees that only one `&mut` can exist
/// for any value. This is a parallel with the fact that
/// `&` and `&mut` references together can be thought of as a _compile-time_
/// version of a read-write lock.
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[doc(alias = "SyncWrapper")]
#[doc(alias = "SyncCell")]
#[doc(alias = "Unique")]
// `Exclusive` can't have derived `PartialOrd`, `Clone`, etc. impls as they would
// use `&` access to the inner value, violating the `Sync` impl's safety
// requirements.
#[derive(Default)]
#[repr(transparent)]
pub struct Exclusive<T: ?Sized> {
inner: T,
}
// See `Exclusive`'s docs for justification.
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
unsafe impl<T: ?Sized> Sync for Exclusive<T> {}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<T: ?Sized> fmt::Debug for Exclusive<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
f.debug_struct("Exclusive").finish_non_exhaustive()
}
}
impl<T: Sized> Exclusive<T> {
/// Wrap a value in an `Exclusive`
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[must_use]
#[inline]
pub const fn new(t: T) -> Self {
Self { inner: t }
}
/// Unwrap the value contained in the `Exclusive`
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[rustc_const_unstable(feature = "exclusive_wrapper", issue = "98407")]
#[must_use]
#[inline]
pub const fn into_inner(self) -> T {
self.inner
}
}
impl<T: ?Sized> Exclusive<T> {
/// Gets exclusive access to the underlying value.
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[must_use]
#[inline]
pub const fn get_mut(&mut self) -> &mut T {
&mut self.inner
}
/// Gets pinned exclusive access to the underlying value.
///
/// `Exclusive` is considered to _structurally pin_ the underlying
/// value, which means _unpinned_ `Exclusive`s can produce _unpinned_
/// access to the underlying value, but _pinned_ `Exclusive`s only
/// produce _pinned_ access to the underlying value.
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[must_use]
#[inline]
pub const fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut T> {
// SAFETY: `Exclusive` can only produce `&mut T` if itself is unpinned
// `Pin::map_unchecked_mut` is not const, so we do this conversion manually
unsafe { Pin::new_unchecked(&mut self.get_unchecked_mut().inner) }
}
/// Build a _mutable_ reference to an `Exclusive<T>` from
/// a _mutable_ reference to a `T`. This allows you to skip
/// building an `Exclusive` with [`Exclusive::new`].
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[must_use]
#[inline]
pub const fn from_mut(r: &'_ mut T) -> &'_ mut Exclusive<T> {
// SAFETY: repr is ≥ C, so refs have the same layout; and `Exclusive` properties are `&mut`-agnostic
unsafe { &mut *(r as *mut T as *mut Exclusive<T>) }
}
/// Build a _pinned mutable_ reference to an `Exclusive<T>` from
/// a _pinned mutable_ reference to a `T`. This allows you to skip
/// building an `Exclusive` with [`Exclusive::new`].
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[must_use]
#[inline]
pub const fn from_pin_mut(r: Pin<&'_ mut T>) -> Pin<&'_ mut Exclusive<T>> {
// SAFETY: `Exclusive` can only produce `&mut T` if itself is unpinned
// `Pin::map_unchecked_mut` is not const, so we do this conversion manually
unsafe { Pin::new_unchecked(Self::from_mut(r.get_unchecked_mut())) }
}
}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[rustc_const_unstable(feature = "const_convert", issue = "143773")]
impl<T> const From<T> for Exclusive<T> {
#[inline]
fn from(t: T) -> Self {
Self::new(t)
}
}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<F, Args> FnOnce<Args> for Exclusive<F>
where
F: FnOnce<Args>,
Args: Tuple,
{
type Output = F::Output;
extern "rust-call" fn call_once(self, args: Args) -> Self::Output {
self.into_inner().call_once(args)
}
}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<F, Args> FnMut<Args> for Exclusive<F>
where
F: FnMut<Args>,
Args: Tuple,
{
extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output {
self.get_mut().call_mut(args)
}
}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<F, Args> Fn<Args> for Exclusive<F>
where
F: Sync + Fn<Args>,
Args: Tuple,
{
extern "rust-call" fn call(&self, args: Args) -> Self::Output {
self.as_ref().call(args)
}
}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<T> Future for Exclusive<T>
where
T: Future + ?Sized,
{
type Output = T::Output;
#[inline]
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.get_pin_mut().poll(cx)
}
}
#[unstable(feature = "coroutine_trait", issue = "43122")] // also #98407
impl<R, G> Coroutine<R> for Exclusive<G>
where
G: Coroutine<R> + ?Sized,
{
type Yield = G::Yield;
type Return = G::Return;
#[inline]
fn resume(self: Pin<&mut Self>, arg: R) -> CoroutineState<Self::Yield, Self::Return> {
G::resume(self.get_pin_mut(), arg)
}
}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<T> AsRef<T> for Exclusive<T>
where
T: Sync + ?Sized,
{
#[inline]
fn as_ref(&self) -> &T {
&self.inner
}
}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<T> Clone for Exclusive<T>
where
T: Sync + Clone,
{
#[inline]
fn clone(&self) -> Self {
Self { inner: self.inner.clone() }
}
}
#[doc(hidden)]
#[unstable(feature = "trivial_clone", issue = "none")]
unsafe impl<T> TrivialClone for Exclusive<T> where T: Sync + TrivialClone {}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<T> Copy for Exclusive<T> where T: Sync + Copy {}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<T, U> PartialEq<Exclusive<U>> for Exclusive<T>
where
T: Sync + PartialEq<U> + ?Sized,
U: Sync + ?Sized,
{
#[inline]
fn eq(&self, other: &Exclusive<U>) -> bool {
self.inner == other.inner
}
}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<T> StructuralPartialEq for Exclusive<T> where T: Sync + StructuralPartialEq + ?Sized {}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<T> Eq for Exclusive<T> where T: Sync + Eq + ?Sized {}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<T> Hash for Exclusive<T>
where
T: Sync + Hash + ?Sized,
{
#[inline]
fn hash<H: Hasher>(&self, state: &mut H) {
Hash::hash(&self.inner, state)
}
}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<T, U> PartialOrd<Exclusive<U>> for Exclusive<T>
where
T: Sync + PartialOrd<U> + ?Sized,
U: Sync + ?Sized,
{
#[inline]
fn partial_cmp(&self, other: &Exclusive<U>) -> Option<Ordering> {
self.inner.partial_cmp(&other.inner)
}
}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<T> Ord for Exclusive<T>
where
T: Sync + Ord + ?Sized,
{
#[inline]
fn cmp(&self, other: &Self) -> Ordering {
self.inner.cmp(&other.inner)
}
}
+2 -2
View File
@@ -3,6 +3,6 @@
#![stable(feature = "rust1", since = "1.0.0")]
pub mod atomic;
mod exclusive;
mod sync_view;
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
pub use exclusive::Exclusive;
pub use sync_view::SyncView;
+414
View File
@@ -0,0 +1,414 @@
//! Defines [`SyncView`].
use core::clone::TrivialClone;
use core::cmp::Ordering;
use core::fmt;
use core::future::Future;
use core::hash::{Hash, Hasher};
use core::marker::{StructuralPartialEq, Tuple};
use core::ops::{Coroutine, CoroutineState};
use core::pin::Pin;
use core::task::{Context, Poll};
/// `SyncView` provides _mutable_ access, also referred to as _exclusive_
/// access to the underlying value. However, it only permits _immutable_, or _shared_
/// access to the underlying value when that value is [`Sync`].
///
/// While this may seem not very useful, it allows `SyncView` to _unconditionally_
/// implement `Sync`. Indeed, the safety requirements of `Sync` state that for `SyncView`
/// to be `Sync`, it must be sound to _share_ across threads, that is, it must be sound
/// for `&SyncView` to cross thread boundaries. By design, a `&SyncView<T>` for non-`Sync`
/// `T` has no API whatsoever, making it useless, thus harmless, thus memory safe.
///
/// Certain constructs like [`Future`]s can only be used with _exclusive_ access,
/// and are often [`Send`] but not `Sync`, so `SyncView` can be used as hint to the
/// Rust compiler that something is `Sync` in practice.
///
/// ## Examples
///
/// Using a non-`Sync` future prevents the wrapping struct from being `Sync`:
///
/// ```compile_fail
/// use core::cell::Cell;
///
/// async fn other() {}
/// fn assert_sync<T: Sync>(t: T) {}
/// struct State<F> {
/// future: F
/// }
///
/// assert_sync(State {
/// future: async {
/// let cell = Cell::new(1);
/// let cell_ref = &cell;
/// other().await;
/// let value = cell_ref.get();
/// }
/// });
/// ```
///
/// `SyncView` ensures the struct is `Sync` without stripping the future of its
/// functionality:
///
/// ```
/// #![feature(exclusive_wrapper)]
/// use core::cell::Cell;
/// use core::sync::SyncView;
///
/// async fn other() {}
/// fn assert_sync<T: Sync>(t: T) {}
/// struct State<F> {
/// future: SyncView<F>
/// }
///
/// assert_sync(State {
/// future: SyncView::new(async {
/// let cell = Cell::new(1);
/// let cell_ref = &cell;
/// other().await;
/// let value = cell_ref.get();
/// })
/// });
/// ```
///
/// ## Parallels with a mutex
///
/// In some sense, `SyncView` can be thought of as a _compile-time_ version of
/// a mutex, as the borrow-checker guarantees that only one `&mut` can exist
/// for any value. This is a parallel with the fact that
/// `&` and `&mut` references together can be thought of as a _compile-time_
/// version of a read-write lock.
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[doc(alias = "SyncWrapper")]
#[doc(alias = "SyncCell")]
#[doc(alias = "Unique")]
#[doc(alias = "Exclusive")]
// `SyncView` can't have derived `PartialOrd`, `Clone`, etc. impls as they would
// use `&` access to the inner value, violating the `Sync` impl's safety
// requirements.
#[repr(transparent)]
pub struct SyncView<T: ?Sized> {
inner: T,
}
// See `SyncView`'s docs for justification.
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
unsafe impl<T: ?Sized> Sync for SyncView<T> {}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[rustc_const_unstable(feature = "const_default", issue = "143894")]
impl<T> const Default for SyncView<T>
where
T: [const] Default,
{
#[inline]
fn default() -> Self {
Self { inner: Default::default() }
}
}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<T: ?Sized> fmt::Debug for SyncView<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
f.debug_struct("SyncView").finish_non_exhaustive()
}
}
impl<T: Sized> SyncView<T> {
/// Wrap a value in an `SyncView`
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[rustc_const_unstable(feature = "exclusive_wrapper", issue = "98407")]
#[must_use]
#[inline]
pub const fn new(t: T) -> Self {
Self { inner: t }
}
/// Unwrap the value contained in the `SyncView`
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[rustc_const_unstable(feature = "exclusive_wrapper", issue = "98407")]
#[must_use]
#[inline]
pub const fn into_inner(self) -> T {
self.inner
}
}
impl<T: ?Sized> SyncView<T> {
/// Gets pinned exclusive access to the underlying value.
///
/// `SyncView` is considered to _structurally pin_ the underlying
/// value, which means _unpinned_ `SyncView`s can produce _unpinned_
/// access to the underlying value, but _pinned_ `SyncView`s only
/// produce _pinned_ access to the underlying value.
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[rustc_const_unstable(feature = "exclusive_wrapper", issue = "98407")]
#[must_use]
#[inline]
pub const fn as_pin_mut(self: Pin<&mut Self>) -> Pin<&mut T> {
// SAFETY: `SyncView` can only produce `&mut T` if itself is unpinned
// `Pin::map_unchecked_mut` is not const, so we do this conversion manually
unsafe { Pin::new_unchecked(&mut self.get_unchecked_mut().inner) }
}
/// Build a _mutable_ reference to an `SyncView<T>` from
/// a _mutable_ reference to a `T`. This allows you to skip
/// building an `SyncView` with [`SyncView::new`].
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[rustc_const_unstable(feature = "exclusive_wrapper", issue = "98407")]
#[must_use]
#[inline]
pub const fn from_mut(r: &'_ mut T) -> &'_ mut SyncView<T> {
// SAFETY: repr is ≥ C, so refs have the same layout; and `SyncView` properties are `&mut`-agnostic
unsafe { &mut *(r as *mut T as *mut SyncView<T>) }
}
/// Build a _pinned mutable_ reference to an `SyncView<T>` from
/// a _pinned mutable_ reference to a `T`. This allows you to skip
/// building an `SyncView` with [`SyncView::new`].
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[rustc_const_unstable(feature = "exclusive_wrapper", issue = "98407")]
#[must_use]
#[inline]
pub const fn from_pin_mut(r: Pin<&'_ mut T>) -> Pin<&'_ mut SyncView<T>> {
// SAFETY: `SyncView` can only produce `&mut T` if itself is unpinned
// `Pin::map_unchecked_mut` is not const, so we do this conversion manually
unsafe { Pin::new_unchecked(Self::from_mut(r.get_unchecked_mut())) }
}
}
impl<T: ?Sized + Sync> SyncView<T> {
/// Gets pinned shared access to the underlying value.
///
/// `SyncView` is considered to _structurally pin_ the underlying
/// value, which means _unpinned_ `SyncView`s can produce _unpinned_
/// access to the underlying value, but _pinned_ `SyncView`s only
/// produce _pinned_ access to the underlying value.
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[rustc_const_unstable(feature = "exclusive_wrapper", issue = "98407")]
#[must_use]
#[inline]
pub const fn as_pin(self: Pin<&Self>) -> Pin<&T> {
// SAFETY: `SyncView` can only produce `&T` if itself is unpinned
// `Pin::map_unchecked` is not const, so we do this conversion manually
unsafe { Pin::new_unchecked(&self.get_ref().inner) }
}
}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[rustc_const_unstable(feature = "const_convert", issue = "143773")]
impl<T> const From<T> for SyncView<T> {
#[inline]
fn from(t: T) -> Self {
Self::new(t)
}
}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[rustc_const_unstable(feature = "const_trait_impl", issue = "143874")]
impl<F, Args> const FnOnce<Args> for SyncView<F>
where
F: [const] FnOnce<Args>,
Args: Tuple,
{
type Output = F::Output;
extern "rust-call" fn call_once(self, args: Args) -> Self::Output {
self.into_inner().call_once(args)
}
}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[rustc_const_unstable(feature = "const_trait_impl", issue = "143874")]
impl<F, Args> const FnMut<Args> for SyncView<F>
where
F: [const] FnMut<Args>,
Args: Tuple,
{
extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output {
self.as_mut().call_mut(args)
}
}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[rustc_const_unstable(feature = "const_trait_impl", issue = "143874")]
impl<F, Args> const Fn<Args> for SyncView<F>
where
F: Sync + [const] Fn<Args>,
Args: Tuple,
{
extern "rust-call" fn call(&self, args: Args) -> Self::Output {
self.as_ref().call(args)
}
}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<F, Args> AsyncFnOnce<Args> for SyncView<F>
where
F: AsyncFnOnce<Args>,
Args: Tuple,
{
type CallOnceFuture = F::CallOnceFuture;
type Output = F::Output;
extern "rust-call" fn async_call_once(self, args: Args) -> Self::CallOnceFuture {
self.into_inner().async_call_once(args)
}
}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<F, Args> AsyncFnMut<Args> for SyncView<F>
where
F: AsyncFnMut<Args>,
Args: Tuple,
{
type CallRefFuture<'a>
= F::CallRefFuture<'a>
where
F: 'a;
extern "rust-call" fn async_call_mut(&mut self, args: Args) -> Self::CallRefFuture<'_> {
self.as_mut().async_call_mut(args)
}
}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<F, Args> AsyncFn<Args> for SyncView<F>
where
F: Sync + AsyncFn<Args>,
Args: Tuple,
{
extern "rust-call" fn async_call(&self, args: Args) -> Self::CallRefFuture<'_> {
self.as_ref().async_call(args)
}
}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<T> Future for SyncView<T>
where
T: Future + ?Sized,
{
type Output = T::Output;
#[inline]
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.as_pin_mut().poll(cx)
}
}
#[unstable(feature = "coroutine_trait", issue = "43122")] // also #98407
impl<R, G> Coroutine<R> for SyncView<G>
where
G: Coroutine<R> + ?Sized,
{
type Yield = G::Yield;
type Return = G::Return;
#[inline]
fn resume(self: Pin<&mut Self>, arg: R) -> CoroutineState<Self::Yield, Self::Return> {
G::resume(self.as_pin_mut(), arg)
}
}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[rustc_const_unstable(feature = "const_convert", issue = "143773")]
impl<T> const AsRef<T> for SyncView<T>
where
T: Sync + ?Sized,
{
/// Gets shared access to the underlying value.
#[inline]
fn as_ref(&self) -> &T {
&self.inner
}
}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[rustc_const_unstable(feature = "const_convert", issue = "143773")]
impl<T> const AsMut<T> for SyncView<T>
where
T: ?Sized,
{
/// Gets exclusive access to the underlying value.
#[inline]
fn as_mut(&mut self) -> &mut T {
&mut self.inner
}
}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[rustc_const_unstable(feature = "const_clone", issue = "142757")]
impl<T> const Clone for SyncView<T>
where
T: Sync + [const] Clone,
{
#[inline]
fn clone(&self) -> Self {
Self { inner: self.inner.clone() }
}
}
#[doc(hidden)]
#[unstable(feature = "trivial_clone", issue = "none")]
#[rustc_const_unstable(feature = "const_clone", issue = "142757")]
unsafe impl<T> const TrivialClone for SyncView<T> where T: Sync + [const] TrivialClone {}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<T> Copy for SyncView<T> where T: Sync + Copy {}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
impl<T, U> const PartialEq<SyncView<U>> for SyncView<T>
where
T: Sync + [const] PartialEq<U> + ?Sized,
U: Sync + ?Sized,
{
#[inline]
fn eq(&self, other: &SyncView<U>) -> bool {
self.inner == other.inner
}
}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<T> StructuralPartialEq for SyncView<T> where T: Sync + StructuralPartialEq + ?Sized {}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
impl<T> const Eq for SyncView<T> where T: Sync + [const] Eq + ?Sized {}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<T> Hash for SyncView<T>
where
T: Sync + Hash + ?Sized,
{
#[inline]
fn hash<H: Hasher>(&self, state: &mut H) {
Hash::hash(&self.inner, state)
}
}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
impl<T, U> const PartialOrd<SyncView<U>> for SyncView<T>
where
T: Sync + [const] PartialOrd<U> + ?Sized,
U: Sync + ?Sized,
{
#[inline]
fn partial_cmp(&self, other: &SyncView<U>) -> Option<Ordering> {
self.inner.partial_cmp(&other.inner)
}
}
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
impl<T> const Ord for SyncView<T>
where
T: Sync + [const] Ord + ?Sized,
{
#[inline]
fn cmp(&self, other: &Self) -> Ordering {
self.inner.cmp(&other.inner)
}
}
+1 -1
View File
@@ -172,7 +172,7 @@
// These come from `core` & `alloc` and only in one flavor: no poisoning.
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
pub use core::sync::Exclusive;
pub use core::sync::SyncView;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::sync::atomic;
+9 -1
View File
@@ -1838,7 +1838,15 @@ pub(crate) fn clean_ty<'tcx>(ty: &hir::Ty<'tcx>, cx: &mut DocContext<'tcx>) -> T
BorrowedRef { lifetime, mutability: m.mutbl, type_: Box::new(clean_ty(m.ty, cx)) }
}
TyKind::Slice(ty) => Slice(Box::new(clean_ty(ty, cx))),
TyKind::Pat(ty, pat) => Type::Pat(Box::new(clean_ty(ty, cx)), format!("{pat:?}").into()),
TyKind::Pat(inner_ty, pat) => {
// Local HIR pattern types should print the same way as cross-crate inlined ones,
// so lower to the canonical `rustc_middle::ty::Pattern` representation first.
let pat = match lower_ty(cx.tcx, ty).kind() {
ty::Pat(_, pat) => format!("{pat:?}").into_boxed_str(),
_ => format!("{pat:?}").into(),
};
Type::Pat(Box::new(clean_ty(inner_ty, cx)), pat)
}
TyKind::FieldOf(ty, hir::TyFieldPath { variant, field }) => {
let field_str = if let Some(variant) = variant {
format!("{variant}.{field}")
@@ -0,0 +1,12 @@
#![feature(pattern_types, pattern_type_macro)]
#![crate_name = "pattern_types_implementors"]
use std::pat::pattern_type;
pub trait MyTrait {}
impl MyTrait for pattern_type!(*const u8 is !null) {}
//@ has pattern_types_implementors/trait.MyTrait.html
//@ has - '//*[@id="implementors-list"]/*[@class="impl"]' 'impl MyTrait for *const u8 is !null'
//@ !has - '//*[@id="implementors-list"]/*[@class="impl"]' 'TyPat {'
@@ -4,11 +4,11 @@
fn f() {}
fn g() {
become std::sync::Exclusive::new(f)() //~ error: tail calls can only be performed with function definitions or pointers
become std::sync::SyncView::new(f)() //~ error: tail calls can only be performed with function definitions or pointers
}
fn h() {
become (&mut &std::sync::Exclusive::new(f))() //~ error: tail calls can only be performed with function definitions or pointers
become (&mut &std::sync::SyncView::new(f))() //~ error: tail calls can only be performed with function definitions or pointers
}
fn i() {
@@ -1,18 +1,18 @@
error: tail calls can only be performed with function definitions or pointers
--> $DIR/callee_is_weird.rs:7:12
|
LL | become std::sync::Exclusive::new(f)()
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
LL | become std::sync::SyncView::new(f)()
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
= note: callee has type `Exclusive<fn() {f}>`
= note: callee has type `SyncView<fn() {f}>`
error: tail calls can only be performed with function definitions or pointers
--> $DIR/callee_is_weird.rs:11:12
|
LL | become (&mut &std::sync::Exclusive::new(f))()
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
LL | become (&mut &std::sync::SyncView::new(f))()
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
= note: callee has type `&Exclusive<fn() {f}>`
= note: callee has type `&SyncView<fn() {f}>`
error: tail calls can only be performed with function definitions or pointers
--> $DIR/callee_is_weird.rs:22:12
+1 -1
View File
@@ -387,7 +387,7 @@ LL | fn in_impl_Fn_return_in_return() -> &'static impl Fn() -> impl Debug { pani
where A: std::marker::Tuple, F: Fn<A>, F: ?Sized;
- impl<Args, F, A> Fn<Args> for Box<F, A>
where Args: std::marker::Tuple, F: Fn<Args>, A: Allocator, F: ?Sized;
- impl<F, Args> Fn<Args> for Exclusive<F>
- impl<F, Args> Fn<Args> for SyncView<F>
where F: Sync, F: Fn<Args>, Args: std::marker::Tuple;
error: unconstrained opaque type
@@ -12,7 +12,7 @@ LL | x = unconstrained_map();
where A: std::marker::Tuple, F: Fn<A>, F: ?Sized;
- impl<Args, F, A> Fn<Args> for Box<F, A>
where Args: std::marker::Tuple, F: Fn<Args>, A: Allocator, F: ?Sized;
- impl<F, Args> Fn<Args> for Exclusive<F>
- impl<F, Args> Fn<Args> for SyncView<F>
where F: Sync, F: Fn<Args>, Args: std::marker::Tuple;
note: required by a bound in `unconstrained_map`
--> $DIR/well-formed-in-relate.rs:21:25