diff --git a/library/alloc/src/collections/vec_deque/iter.rs b/library/alloc/src/collections/vec_deque/iter.rs index d3dbd10c863fb..ce2ab65fec49a 100644 --- a/library/alloc/src/collections/vec_deque/iter.rs +++ b/library/alloc/src/collections/vec_deque/iter.rs @@ -144,6 +144,8 @@ impl<'a, T> Iterator for Iter<'a, T> { } #[inline] + #[allow(unused_parens)] + #[core::contracts::requires(idx < self.len())] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item { // Safety: The TrustedRandomAccess contract requires that callers only pass an index // that is in bounds. diff --git a/library/alloc/src/collections/vec_deque/iter_mut.rs b/library/alloc/src/collections/vec_deque/iter_mut.rs index 0c5f06e752b7b..6710af20b8341 100644 --- a/library/alloc/src/collections/vec_deque/iter_mut.rs +++ b/library/alloc/src/collections/vec_deque/iter_mut.rs @@ -208,6 +208,8 @@ impl<'a, T> Iterator for IterMut<'a, T> { } #[inline] + #[allow(unused_parens)] + #[core::contracts::requires(idx < self.len())] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item { // Safety: The TrustedRandomAccess contract requires that callers only pass an index // that is in bounds. diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs index fc3266b74793c..898ff067b2d16 100644 --- a/library/alloc/src/lib.rs +++ b/library/alloc/src/lib.rs @@ -104,6 +104,7 @@ #![feature(const_default)] #![feature(const_eval_select)] #![feature(const_heap)] +#![feature(contracts)] #![feature(core_intrinsics)] #![feature(deprecated_suggestion)] #![feature(deref_pure_trait)] diff --git a/library/alloc/src/vec/into_iter.rs b/library/alloc/src/vec/into_iter.rs index 37df928228d9c..5bdfeaa5bee1b 100644 --- a/library/alloc/src/vec/into_iter.rs +++ b/library/alloc/src/vec/into_iter.rs @@ -354,6 +354,8 @@ impl Iterator for IntoIter { R::from_output(accum) } + #[allow(unused_parens)] + #[core::contracts::requires(i < self.len())] unsafe fn __iterator_get_unchecked(&mut self, i: usize) -> Self::Item where Self: TrustedRandomAccessNoCoerce, diff --git a/library/core/src/alloc/layout.rs b/library/core/src/alloc/layout.rs index cd5fd77f86597..f987edad32ed1 100644 --- a/library/core/src/alloc/layout.rs +++ b/library/core/src/alloc/layout.rs @@ -66,6 +66,14 @@ impl Layout { #[stable(feature = "alloc_layout", since = "1.28.0")] #[rustc_const_stable(feature = "const_alloc_layout_size_align", since = "1.50.0")] #[inline] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::ensures( + move |result: &Result| + result.is_err() || ( + align.is_power_of_two() && + size <= isize::MAX as usize - (align - 1) && + result.as_ref().unwrap().size() == size && + result.as_ref().unwrap().align() == align))] pub const fn from_size_align(size: usize, align: usize) -> Result { if Layout::is_size_align_valid(size, align) { // SAFETY: Layout::is_size_align_valid checks the preconditions for this call. @@ -127,6 +135,11 @@ impl Layout { #[must_use] #[inline] #[track_caller] + #[rustc_allow_const_fn_unstable(contracts)] + #[allow(unused_parens)] + #[core::contracts::requires(Layout::from_size_align(size, align).is_ok())] + #[core::contracts::ensures( + move |result: &Self| result.size() == size && result.align() == align)] pub const unsafe fn from_size_align_unchecked(size: usize, align: usize) -> Self { assert_unsafe_precondition!( check_library_ub, @@ -167,6 +180,10 @@ impl Layout { #[rustc_const_stable(feature = "alloc_layout_const_new", since = "1.42.0")] #[must_use] #[inline] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::ensures( + |result: &Self| + result.size() == mem::size_of::() && result.align() == mem::align_of::())] pub const fn new() -> Self { let (size, align) = size_align::(); // SAFETY: if the type is instantiated, rustc already ensures that its @@ -182,6 +199,11 @@ impl Layout { #[rustc_const_stable(feature = "const_alloc_layout", since = "1.85.0")] #[must_use] #[inline] + #[rustc_allow_const_fn_unstable(contracts)] + #[allow(unused_parens)] + #[core::contracts::requires(mem::align_of_val(t).is_power_of_two())] + // FIXME: requires `&self` to be `'static` + // #[core::contracts::ensures(move |result: &Self| result.align() == mem::align_of_val(t))] pub const fn for_value(t: &T) -> Self { let (size, align) = (size_of_val(t), align_of_val(t)); // SAFETY: see rationale in `new` for why this is using the unsafe variant @@ -217,6 +239,8 @@ impl Layout { /// [extern type]: ../../unstable-book/language-features/extern-types.html #[unstable(feature = "layout_for_ptr", issue = "69835")] #[must_use] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::ensures(|result: &Self| result.align().is_power_of_two())] pub const unsafe fn for_value_raw(t: *const T) -> Self { // SAFETY: we pass along the prerequisites of these functions to the caller let (size, align) = unsafe { (mem::size_of_val_raw(t), mem::align_of_val_raw(t)) }; @@ -233,6 +257,8 @@ impl Layout { #[unstable(feature = "alloc_layout_extra", issue = "55724")] #[must_use] #[inline] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::ensures(|result: &NonNull| result.is_aligned())] pub const fn dangling(&self) -> NonNull { NonNull::without_provenance(self.align.as_nonzero()) } @@ -254,6 +280,12 @@ impl Layout { #[stable(feature = "alloc_layout_manipulation", since = "1.44.0")] #[rustc_const_stable(feature = "const_alloc_layout", since = "1.85.0")] #[inline] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::ensures( + move |result: &Result| + result.is_err() || ( + result.as_ref().unwrap().align() >= align && + result.as_ref().unwrap().align().is_power_of_two()))] pub const fn align_to(&self, align: usize) -> Result { if let Some(align) = Alignment::new(align) { Layout::from_size_alignment(self.size, Alignment::max(self.align, align)) @@ -282,6 +314,8 @@ impl Layout { #[must_use = "this returns the padding needed, \ without modifying the `Layout`"] #[inline] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::ensures(move |result| *result <= align)] pub const fn padding_needed_for(&self, align: usize) -> usize { // FIXME: Can we just change the type on this to `Alignment`? let Some(align) = Alignment::new(align) else { return usize::MAX }; @@ -331,6 +365,14 @@ impl Layout { #[must_use = "this returns a new `Layout`, \ without modifying the original"] #[inline] + // FIXME: requires `&self` to be `'static` + // #[rustc_allow_const_fn_unstable(contracts)] + // #[core::contracts::ensures( + // move |result: &Layout| + // result.size() >= self.size() && + // result.align() == self.align() && + // result.size() % result.align() == 0 && + // self.size() + self.padding_needed_for(self.align()) == result.size())] pub const fn pad_to_align(&self) -> Layout { // This cannot overflow. Quoting from the invariant of Layout: // > `size`, when rounded up to the nearest multiple of `align`, @@ -371,6 +413,12 @@ impl Layout { /// ``` #[unstable(feature = "alloc_layout_extra", issue = "55724")] #[inline] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::ensures( + move |result: &Result<(Self, usize), LayoutError>| + result.is_err() || ( + (n == 0 || result.as_ref().unwrap().0.size() % n == 0) && + result.as_ref().unwrap().0.size() == n * result.as_ref().unwrap().1))] pub const fn repeat(&self, n: usize) -> Result<(Self, usize), LayoutError> { let padded = self.pad_to_align(); if let Ok(repeated) = padded.repeat_packed(n) { @@ -428,6 +476,15 @@ impl Layout { #[stable(feature = "alloc_layout_manipulation", since = "1.44.0")] #[rustc_const_stable(feature = "const_alloc_layout", since = "1.85.0")] #[inline] + // FIXME: requires `&self` to be `'static` + // #[rustc_allow_const_fn_unstable(contracts)] + // #[core::contracts::ensures( + // move |result: &Result<(Self, usize), LayoutError>| + // result.is_err() || ( + // result.as_ref().unwrap().0.align() == cmp::max(self.align(), next.align()) && + // result.as_ref().unwrap().0.size() >= self.size() + next.size() && + // result.as_ref().unwrap().1 >= self.size() && + // result.as_ref().unwrap().1 <= result.as_ref().unwrap().0.size()))] pub const fn extend(&self, next: Self) -> Result<(Self, usize), LayoutError> { let new_align = Alignment::max(self.align, next.align); let offset = self.size_rounded_up_to_custom_align(next.align); @@ -459,6 +516,13 @@ impl Layout { /// On arithmetic overflow, returns `LayoutError`. #[unstable(feature = "alloc_layout_extra", issue = "55724")] #[inline] + // FIXME: requires `&self` to be `'static` + // #[rustc_allow_const_fn_unstable(contracts)] + // #[core::contracts::ensures( + // move |result: &Result| + // result.is_err() || ( + // result.as_ref().unwrap().size() == n * self.size() && + // result.as_ref().unwrap().align() == self.align()))] pub const fn repeat_packed(&self, n: usize) -> Result { if let Some(size) = self.size.checked_mul(n) { // The safe constructor is called here to enforce the isize size limit. @@ -476,6 +540,13 @@ impl Layout { /// On arithmetic overflow, returns `LayoutError`. #[unstable(feature = "alloc_layout_extra", issue = "55724")] #[inline] + // FIXME: requires `&self` to be `'static` + // #[rustc_allow_const_fn_unstable(contracts)] + // #[core::contracts::ensures( + // move |result: &Result| + // result.is_err() || ( + // result.as_ref().unwrap().size() == self.size() + next.size() && + // result.as_ref().unwrap().align() == self.align()))] pub const fn extend_packed(&self, next: Self) -> Result { // SAFETY: each `size` is at most `isize::MAX == usize::MAX/2`, so the // sum is at most `usize::MAX/2*2 == usize::MAX - 1`, and cannot overflow. @@ -491,6 +562,12 @@ impl Layout { #[stable(feature = "alloc_layout_manipulation", since = "1.44.0")] #[rustc_const_stable(feature = "const_alloc_layout", since = "1.85.0")] #[inline] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::ensures( + move |result: &Result| + result.is_err() || ( + result.as_ref().unwrap().size() == n * mem::size_of::() && + result.as_ref().unwrap().align() == mem::align_of::()))] pub const fn array(n: usize) -> Result { // Reduce the amount of code we need to monomorphize per `T`. return inner(T::LAYOUT, n); diff --git a/library/core/src/array/iter.rs b/library/core/src/array/iter.rs index fdae5c08f1e8e..a4909695514d4 100644 --- a/library/core/src/array/iter.rs +++ b/library/core/src/array/iter.rs @@ -138,6 +138,9 @@ impl IntoIter { /// ``` #[unstable(feature = "array_into_iter_constructors", issue = "91583")] #[inline] + #[rustc_allow_const_fn_unstable(contracts)] + #[allow(unused_parens)] + #[core::contracts::requires(initialized.start <= initialized.end && initialized.end <= N)] pub const unsafe fn new_unchecked( buffer: [MaybeUninit; N], initialized: Range, @@ -279,6 +282,8 @@ impl Iterator for IntoIter { } #[inline] + #[allow(unused_parens)] + #[core::contracts::requires(idx < self.len())] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item { // SAFETY: The caller must provide an idx that is in bound of the remainder. let elem_ref = unsafe { self.as_mut_slice().get_unchecked_mut(idx) }; diff --git a/library/core/src/ascii/ascii_char.rs b/library/core/src/ascii/ascii_char.rs index d77fafed2039b..38ff297ed81ac 100644 --- a/library/core/src/ascii/ascii_char.rs +++ b/library/core/src/ascii/ascii_char.rs @@ -458,6 +458,10 @@ impl AsciiChar { /// or returns `None` if it's too large. #[unstable(feature = "ascii_char", issue = "110998")] #[inline] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::ensures( + move |result: &Option| + (b <= 127) == (result.is_some() && result.unwrap() as u8 == b))] pub const fn from_u8(b: u8) -> Option { if b <= 127 { // SAFETY: Just checked that `b` is in-range @@ -475,6 +479,10 @@ impl AsciiChar { /// `b` must be in `0..=127`, or else this is UB. #[unstable(feature = "ascii_char", issue = "110998")] #[inline] + #[rustc_allow_const_fn_unstable(contracts)] + #[allow(unused_parens)] + #[core::contracts::requires(b <= 127)] + #[core::contracts::ensures(move |result: &Self| *result as u8 == b)] pub const unsafe fn from_u8_unchecked(b: u8) -> Self { // SAFETY: Our safety precondition is that `b` is in-range. unsafe { transmute(b) } @@ -513,6 +521,12 @@ impl AsciiChar { #[unstable(feature = "ascii_char", issue = "110998")] #[inline] #[track_caller] + // Only `d < 64` is required for safety as described above, but we use `d < 10` as in the + // `assert_unsafe_precondition` inside. See https://github.com/rust-lang/rust/pull/129374 for + // some context about the discrepancy. + #[rustc_allow_const_fn_unstable(contracts)] + #[allow(unused_parens)] + #[core::contracts::requires(d < 10)] pub const unsafe fn digit_unchecked(d: u8) -> Self { assert_unsafe_precondition!( check_library_ub, @@ -532,6 +546,8 @@ impl AsciiChar { /// Gets this ASCII character as a byte. #[unstable(feature = "ascii_char", issue = "110998")] #[inline] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::ensures(|result: &u8| *result <= 127)] pub const fn to_u8(self) -> u8 { self as u8 } diff --git a/library/core/src/char/convert.rs b/library/core/src/char/convert.rs index 6380f42d320c6..965c4ce7881c8 100644 --- a/library/core/src/char/convert.rs +++ b/library/core/src/char/convert.rs @@ -23,6 +23,10 @@ pub(super) const fn from_u32(i: u32) -> Option { #[must_use] #[allow(unnecessary_transmutes)] #[track_caller] +#[rustc_allow_const_fn_unstable(contracts)] +#[allow(unused_parens)] +#[core::contracts::requires(char_try_from_u32(i).is_ok())] +#[core::contracts::ensures(move |result: &char| *result as u32 == i)] pub(super) const unsafe fn from_u32_unchecked(i: u32) -> char { // SAFETY: the caller must guarantee that `i` is a valid char value. unsafe { diff --git a/library/core/src/char/mod.rs b/library/core/src/char/mod.rs index 82a3f6f916be3..24e24eda61cf3 100644 --- a/library/core/src/char/mod.rs +++ b/library/core/src/char/mod.rs @@ -138,6 +138,9 @@ pub const fn from_u32(i: u32) -> Option { #[rustc_const_stable(feature = "const_char_from_u32_unchecked", since = "1.81.0")] #[must_use] #[inline] +#[rustc_allow_const_fn_unstable(contracts)] +#[allow(unused_parens)] +#[core::contracts::requires(i <= 0x10FFFF && (i < 0xD800 || i > 0xDFFF))] pub const unsafe fn from_u32_unchecked(i: u32) -> char { // SAFETY: the safety contract must be upheld by the caller. unsafe { self::convert::from_u32_unchecked(i) } @@ -399,6 +402,7 @@ macro_rules! casemappingiter_impls { self.0.advance_by(n) } + #[core::contracts::requires(idx < self.0.len())] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item { // SAFETY: just forwarding requirements to caller unsafe { self.0.__iterator_get_unchecked(idx) } @@ -533,6 +537,8 @@ impl Iterator for CaseMappingIter { self.0.advance_by(n) } + #[allow(unused_parens)] + #[core::contracts::requires(idx < self.len())] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item { // SAFETY: just forwarding requirements to caller unsafe { self.0.__iterator_get_unchecked(idx) } diff --git a/library/core/src/ffi/c_str.rs b/library/core/src/ffi/c_str.rs index 09d9b160700ca..e2bbb539967d3 100644 --- a/library/core/src/ffi/c_str.rs +++ b/library/core/src/ffi/c_str.rs @@ -250,6 +250,14 @@ impl CStr { #[must_use] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_const_stable(feature = "const_cstr_from_ptr", since = "1.81.0")] + #[rustc_allow_const_fn_unstable(contracts)] + #[allow(unused_parens)] + #[core::contracts::requires(!ptr.is_null())] + #[core::contracts::ensures( + |result: &&CStr| + !result.inner.is_empty() && + result.inner[result.inner.len() - 1] == 0 && + !result.inner[..result.inner.len() - 1].contains(&0))] pub const unsafe fn from_ptr<'a>(ptr: *const c_char) -> &'a CStr { // SAFETY: The caller has provided a pointer that points to a valid C // string with a NUL terminator less than `isize::MAX` from `ptr`. @@ -385,6 +393,15 @@ impl CStr { #[stable(feature = "cstr_from_bytes", since = "1.10.0")] #[rustc_const_stable(feature = "const_cstr_unchecked", since = "1.59.0")] #[rustc_allow_const_fn_unstable(const_eval_select)] + #[rustc_allow_const_fn_unstable(contracts)] + #[allow(unused_parens)] + #[core::contracts::requires( + !bytes.is_empty() && bytes[bytes.len() - 1] == 0 && !bytes[..bytes.len()-1].contains(&0))] + #[core::contracts::ensures( + |result: &&CStr| + !result.inner.is_empty() && + result.inner[result.inner.len() - 1] == 0 && + !result.inner[..result.inner.len() - 1].contains(&0))] pub const unsafe fn from_bytes_with_nul_unchecked(bytes: &[u8]) -> &CStr { const_eval_select!( @capture { bytes: &[u8] } -> &CStr: @@ -723,6 +740,12 @@ impl const AsRef for CStr { #[inline] #[unstable(feature = "cstr_internals", issue = "none")] #[rustc_allow_const_fn_unstable(const_eval_select)] +#[rustc_allow_const_fn_unstable(contracts)] +#[core::contracts::ensures( + move |&result| + result < isize::MAX as usize && + // SAFETY: result is within isize::MAX + unsafe { *ptr.add(result) } == 0)] const unsafe fn strlen(ptr: *const c_char) -> usize { const_eval_select!( @capture { s: *const c_char = ptr } -> usize: diff --git a/library/core/src/intrinsics/fallback.rs b/library/core/src/intrinsics/fallback.rs index 932537f2581f8..aa1e836f08bef 100644 --- a/library/core/src/intrinsics/fallback.rs +++ b/library/core/src/intrinsics/fallback.rs @@ -130,6 +130,7 @@ macro_rules! impl_disjoint_bitor { impl const DisjointBitOr for $t { #[cfg_attr(miri, track_caller)] #[inline] + #[core::contracts::requires((self & other) == zero!($t))] unsafe fn disjoint_bitor(self, other: Self) -> Self { // Note that the assume here is required for UB detection in Miri! diff --git a/library/core/src/intrinsics/mod.rs b/library/core/src/intrinsics/mod.rs index cef700be9ea1f..7be7d6ea8aa5a 100644 --- a/library/core/src/intrinsics/mod.rs +++ b/library/core/src/intrinsics/mod.rs @@ -2559,6 +2559,9 @@ pub const fn is_val_statically_known(_arg: T) -> bool { #[inline] #[rustc_intrinsic] #[rustc_intrinsic_const_stable_indirect] +#[allow(unused_parens)] +#[rustc_allow_const_fn_unstable(contracts)] +#[core::contracts::requires(x.addr() != y.addr() || core::mem::size_of::() == 0)] pub const unsafe fn typed_swap_nonoverlapping(x: *mut T, y: *mut T) { // SAFETY: The caller provided single non-overlapping items behind // pointers, so swapping them with `count: 1` is fine. diff --git a/library/core/src/iter/adapters/cloned.rs b/library/core/src/iter/adapters/cloned.rs index aea6d64281aec..10705365fbb03 100644 --- a/library/core/src/iter/adapters/cloned.rs +++ b/library/core/src/iter/adapters/cloned.rs @@ -61,6 +61,8 @@ where self.it.map(T::clone).fold(init, f) } + #[allow(unused_parens)] + #[core::contracts::requires(idx < self.it.size_hint().0)] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> T where Self: TrustedRandomAccessNoCoerce, diff --git a/library/core/src/iter/adapters/copied.rs b/library/core/src/iter/adapters/copied.rs index 23e4e25ab5388..9d9a633f99f74 100644 --- a/library/core/src/iter/adapters/copied.rs +++ b/library/core/src/iter/adapters/copied.rs @@ -92,6 +92,8 @@ where self.it.advance_by(n) } + #[allow(unused_parens)] + #[core::contracts::requires(idx < self.it.size_hint().0)] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> T where Self: TrustedRandomAccessNoCoerce, diff --git a/library/core/src/iter/adapters/enumerate.rs b/library/core/src/iter/adapters/enumerate.rs index f7b9f0b7a5e9d..a698748978f86 100644 --- a/library/core/src/iter/adapters/enumerate.rs +++ b/library/core/src/iter/adapters/enumerate.rs @@ -160,6 +160,8 @@ where #[rustc_inherit_overflow_checks] #[inline] + #[allow(unused_parens)] + #[core::contracts::requires(idx < self.iter.size_hint().0)] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> ::Item where Self: TrustedRandomAccessNoCoerce, diff --git a/library/core/src/iter/adapters/fuse.rs b/library/core/src/iter/adapters/fuse.rs index 0072a95e8dfe0..b75da9e9a2659 100644 --- a/library/core/src/iter/adapters/fuse.rs +++ b/library/core/src/iter/adapters/fuse.rs @@ -109,6 +109,9 @@ where } #[inline] + #[allow(unused_parens)] + #[core::contracts::requires( + self.iter.is_some() && idx < self.iter.as_ref().unwrap().size_hint().0)] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item where Self: TrustedRandomAccessNoCoerce, diff --git a/library/core/src/iter/adapters/map.rs b/library/core/src/iter/adapters/map.rs index 007c2d5acc2d0..f82d8c306d345 100644 --- a/library/core/src/iter/adapters/map.rs +++ b/library/core/src/iter/adapters/map.rs @@ -129,6 +129,8 @@ where } #[inline] + #[allow(unused_parens)] + #[core::contracts::requires(idx < self.iter.size_hint().0)] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> B where Self: TrustedRandomAccessNoCoerce, diff --git a/library/core/src/iter/adapters/skip.rs b/library/core/src/iter/adapters/skip.rs index 55c4a7f14fbd6..b2ac2790adf75 100644 --- a/library/core/src/iter/adapters/skip.rs +++ b/library/core/src/iter/adapters/skip.rs @@ -158,6 +158,8 @@ where } #[doc(hidden)] + #[allow(unused_parens)] + #[core::contracts::requires(idx + self.n < self.iter.size_hint().0)] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item where Self: TrustedRandomAccessNoCoerce, diff --git a/library/core/src/iter/adapters/zip.rs b/library/core/src/iter/adapters/zip.rs index c5e199c30821d..a20ff5719c5fc 100644 --- a/library/core/src/iter/adapters/zip.rs +++ b/library/core/src/iter/adapters/zip.rs @@ -104,6 +104,8 @@ where } #[inline] + #[allow(unused_parens)] + #[core::contracts::requires(idx < self.size_hint().0)] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item where Self: TrustedRandomAccessNoCoerce, diff --git a/library/core/src/iter/range.rs b/library/core/src/iter/range.rs index 9e43d5688cecc..d1535d663638a 100644 --- a/library/core/src/iter/range.rs +++ b/library/core/src/iter/range.rs @@ -184,12 +184,14 @@ pub trait Step: Clone + PartialOrd + Sized { // than the signed::MAX value. Therefore `as` casting to the signed type would be incorrect. macro_rules! step_signed_methods { ($unsigned: ty) => { + #[core::contracts::requires(start.checked_add_unsigned(n as $unsigned).is_some())] #[inline] unsafe fn forward_unchecked(start: Self, n: usize) -> Self { // SAFETY: the caller has to guarantee that `start + n` doesn't overflow. unsafe { start.checked_add_unsigned(n as $unsigned).unwrap_unchecked() } } + #[core::contracts::requires(start.checked_sub_unsigned(n as $unsigned).is_some())] #[inline] unsafe fn backward_unchecked(start: Self, n: usize) -> Self { // SAFETY: the caller has to guarantee that `start - n` doesn't overflow. @@ -200,12 +202,14 @@ macro_rules! step_signed_methods { macro_rules! step_unsigned_methods { () => { + #[core::contracts::requires(start.checked_add(n as Self).is_some())] #[inline] unsafe fn forward_unchecked(start: Self, n: usize) -> Self { // SAFETY: the caller has to guarantee that `start + n` doesn't overflow. unsafe { start.unchecked_add(n as Self) } } + #[core::contracts::requires(start >= (n as Self))] #[inline] unsafe fn backward_unchecked(start: Self, n: usize) -> Self { // SAFETY: the caller has to guarantee that `start - n` doesn't overflow. @@ -495,6 +499,12 @@ impl Step for char { Some(unsafe { char::from_u32_unchecked(res) }) } + #[allow(unused_parens)] + #[core::contracts::requires( + (start as u32).checked_add(count as u32).is_some_and(|dist| + (start as u32) >= 0xD800 || + dist < 0xD800 || + dist.checked_add(0x800).is_some()))] #[inline] unsafe fn forward_unchecked(start: char, count: usize) -> char { let start = start as u32; @@ -511,6 +521,12 @@ impl Step for char { unsafe { char::from_u32_unchecked(res) } } + #[allow(unused_parens)] + #[core::contracts::requires( + (start as u32).checked_sub(count as u32).is_some_and(|dist| + (start as u32) < 0xE000 || + dist >= 0xE000 || + dist.checked_sub(0x800).is_some()))] #[inline] unsafe fn backward_unchecked(start: char, count: usize) -> char { let start = start as u32; @@ -549,6 +565,8 @@ impl Step for AsciiChar { Some(unsafe { AsciiChar::from_u8_unchecked(end) }) } + #[allow(unused_parens)] + #[core::contracts::requires(count < 256 && start.to_u8().checked_add(count as u8).is_some())] #[inline] unsafe fn forward_unchecked(start: AsciiChar, count: usize) -> AsciiChar { // SAFETY: Caller asserts that result is a valid ASCII character, @@ -559,6 +577,8 @@ impl Step for AsciiChar { unsafe { AsciiChar::from_u8_unchecked(end) } } + #[allow(unused_parens)] + #[core::contracts::requires(count < 256 && start.to_u8().checked_sub(count as u8).is_some())] #[inline] unsafe fn backward_unchecked(start: AsciiChar, count: usize) -> AsciiChar { // SAFETY: Caller asserts that result is a valid ASCII character, @@ -587,6 +607,8 @@ impl Step for Ipv4Addr { u32::backward_checked(start.to_bits(), count).map(Ipv4Addr::from_bits) } + #[allow(unused_parens)] + #[core::contracts::requires(start.to_bits().checked_add(count as u32).is_some())] #[inline] unsafe fn forward_unchecked(start: Ipv4Addr, count: usize) -> Ipv4Addr { // SAFETY: Since u32 and Ipv4Addr are losslessly convertible, @@ -594,6 +616,8 @@ impl Step for Ipv4Addr { Ipv4Addr::from_bits(unsafe { u32::forward_unchecked(start.to_bits(), count) }) } + #[allow(unused_parens)] + #[core::contracts::requires(start.to_bits().checked_sub(count as u32).is_some())] #[inline] unsafe fn backward_unchecked(start: Ipv4Addr, count: usize) -> Ipv4Addr { // SAFETY: Since u32 and Ipv4Addr are losslessly convertible, @@ -619,6 +643,8 @@ impl Step for Ipv6Addr { u128::backward_checked(start.to_bits(), count).map(Ipv6Addr::from_bits) } + #[allow(unused_parens)] + #[core::contracts::requires(start.to_bits().checked_add(count as u128).is_some())] #[inline] unsafe fn forward_unchecked(start: Ipv6Addr, count: usize) -> Ipv6Addr { // SAFETY: Since u128 and Ipv6Addr are losslessly convertible, @@ -626,6 +652,8 @@ impl Step for Ipv6Addr { Ipv6Addr::from_bits(unsafe { u128::forward_unchecked(start.to_bits(), count) }) } + #[allow(unused_parens)] + #[core::contracts::requires(start.to_bits().checked_sub(count as u128).is_some())] #[inline] unsafe fn backward_unchecked(start: Ipv6Addr, count: usize) -> Ipv6Addr { // SAFETY: Since u128 and Ipv6Addr are losslessly convertible, @@ -905,6 +933,8 @@ impl Iterator for ops::Range { } #[inline] + #[allow(unused_parens)] + #[core::contracts::requires(idx < self.size_hint().0)] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item where Self: TrustedRandomAccessNoCoerce, diff --git a/library/core/src/iter/traits/iterator.rs b/library/core/src/iter/traits/iterator.rs index 695f8d1e195e9..b4c908e40fda8 100644 --- a/library/core/src/iter/traits/iterator.rs +++ b/library/core/src/iter/traits/iterator.rs @@ -1710,6 +1710,8 @@ pub trait Iterator { /// ``` #[inline] #[unstable(feature = "iter_map_windows", reason = "recently added", issue = "87155")] + #[allow(unused_parens)] + #[core::contracts::requires(N > 0)] fn map_windows(self, f: F) -> MapWindows where Self: Sized, diff --git a/library/core/src/lib.rs b/library/core/src/lib.rs index 54adf97f10020..4dd36fd86d62a 100644 --- a/library/core/src/lib.rs +++ b/library/core/src/lib.rs @@ -106,6 +106,7 @@ #![feature(const_cmp)] #![feature(const_destruct)] #![feature(const_eval_select)] +#![feature(contracts)] #![feature(core_intrinsics)] #![feature(coverage_attribute)] #![feature(disjoint_bitor)] diff --git a/library/core/src/num/f128.rs b/library/core/src/num/f128.rs index 4fe4735e304c9..bf4aed7af345d 100644 --- a/library/core/src/num/f128.rs +++ b/library/core/src/num/f128.rs @@ -874,6 +874,8 @@ impl f128 { #[inline] #[unstable(feature = "f128", issue = "116909")] #[must_use = "this returns the result of the operation, without modifying the original"] + #[allow(unused_parens)] + #[core::contracts::requires(self.is_finite())] pub unsafe fn to_int_unchecked(self) -> Int where Self: FloatToInt, diff --git a/library/core/src/num/f16.rs b/library/core/src/num/f16.rs index 0bea6bc8801d8..c28da8173db84 100644 --- a/library/core/src/num/f16.rs +++ b/library/core/src/num/f16.rs @@ -861,6 +861,8 @@ impl f16 { #[inline] #[unstable(feature = "f16", issue = "116909")] #[must_use = "this returns the result of the operation, without modifying the original"] + #[allow(unused_parens)] + #[core::contracts::requires(self.is_finite())] pub unsafe fn to_int_unchecked(self) -> Int where Self: FloatToInt, diff --git a/library/core/src/num/f32.rs b/library/core/src/num/f32.rs index e380cc698f574..39d04cc6f448e 100644 --- a/library/core/src/num/f32.rs +++ b/library/core/src/num/f32.rs @@ -1065,6 +1065,8 @@ impl f32 { without modifying the original"] #[stable(feature = "float_approx_unchecked_to", since = "1.44.0")] #[inline] + #[allow(unused_parens)] + #[core::contracts::requires(self.is_finite())] pub unsafe fn to_int_unchecked(self) -> Int where Self: FloatToInt, diff --git a/library/core/src/num/f64.rs b/library/core/src/num/f64.rs index ff7449fd996ce..93b3353fde907 100644 --- a/library/core/src/num/f64.rs +++ b/library/core/src/num/f64.rs @@ -1064,6 +1064,8 @@ impl f64 { without modifying the original"] #[stable(feature = "float_approx_unchecked_to", since = "1.44.0")] #[inline] + #[allow(unused_parens)] + #[core::contracts::requires(self.is_finite())] pub unsafe fn to_int_unchecked(self) -> Int where Self: FloatToInt, diff --git a/library/core/src/num/int_macros.rs b/library/core/src/num/int_macros.rs index c3460a6409069..24019eef2dd14 100644 --- a/library/core/src/num/int_macros.rs +++ b/library/core/src/num/int_macros.rs @@ -554,6 +554,8 @@ macro_rules! int_impl { without modifying the original"] #[inline(always)] #[track_caller] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::requires(!self.overflowing_add(rhs).1)] pub const unsafe fn unchecked_add(self, rhs: Self) -> Self { assert_unsafe_precondition!( check_language_ub, @@ -694,6 +696,8 @@ macro_rules! int_impl { without modifying the original"] #[inline(always)] #[track_caller] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::requires(!self.overflowing_sub(rhs).1)] pub const unsafe fn unchecked_sub(self, rhs: Self) -> Self { assert_unsafe_precondition!( check_language_ub, @@ -834,6 +838,8 @@ macro_rules! int_impl { without modifying the original"] #[inline(always)] #[track_caller] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::requires(!self.overflowing_mul(rhs).1)] pub const unsafe fn unchecked_mul(self, rhs: Self) -> Self { assert_unsafe_precondition!( check_language_ub, @@ -1252,6 +1258,9 @@ macro_rules! int_impl { without modifying the original"] #[inline(always)] #[track_caller] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::requires(self != <$SelfT>::MIN)] + #[core::contracts::ensures(move |result| *result == -self)] pub const unsafe fn unchecked_neg(self) -> Self { assert_unsafe_precondition!( check_language_ub, @@ -1372,6 +1381,8 @@ macro_rules! int_impl { without modifying the original"] #[inline(always)] #[track_caller] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::requires(rhs < <$ActualT>::BITS)] pub const unsafe fn unchecked_shl(self, rhs: u32) -> Self { assert_unsafe_precondition!( check_language_ub, @@ -1547,6 +1558,8 @@ macro_rules! int_impl { without modifying the original"] #[inline(always)] #[track_caller] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::requires(rhs < <$ActualT>::BITS)] pub const unsafe fn unchecked_shr(self, rhs: u32) -> Self { assert_unsafe_precondition!( check_language_ub, @@ -2278,6 +2291,8 @@ macro_rules! int_impl { #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline(always)] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::ensures(move |result| *result == self << (rhs & (Self::BITS - 1)))] pub const fn wrapping_shl(self, rhs: u32) -> Self { // SAFETY: the masking by the bitsize of the type ensures that we do not shift // out of bounds @@ -2305,6 +2320,8 @@ macro_rules! int_impl { #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline(always)] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::ensures(move |result| *result == self >> (rhs & (Self::BITS - 1)))] pub const fn wrapping_shr(self, rhs: u32) -> Self { // SAFETY: the masking by the bitsize of the type ensures that we do not shift // out of bounds diff --git a/library/core/src/num/mod.rs b/library/core/src/num/mod.rs index c75ee11d15efe..157a72eeadb29 100644 --- a/library/core/src/num/mod.rs +++ b/library/core/src/num/mod.rs @@ -505,6 +505,9 @@ impl u8 { #[must_use] #[unstable(feature = "ascii_char", issue = "110998")] #[inline] + #[allow(unused_parens)] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::requires(self.is_ascii())] pub const unsafe fn as_ascii_unchecked(&self) -> ascii::Char { assert_unsafe_precondition!( check_library_ub, diff --git a/library/core/src/num/niche_types.rs b/library/core/src/num/niche_types.rs index 610d9d8cf92e0..d0a222db0f0cb 100644 --- a/library/core/src/num/niche_types.rs +++ b/library/core/src/num/niche_types.rs @@ -33,6 +33,12 @@ macro_rules! define_valid_range_type { impl $name { #[inline] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::ensures( + |result: &Option<$name>| + result.is_none() || ( + (result.unwrap().as_inner() as $uint) >= ($low as $uint) && + (result.unwrap().as_inner() as $uint) <= ($high as $uint)))] pub const fn new(val: $int) -> Option { if (val as $uint) >= ($low as $uint) && (val as $uint) <= ($high as $uint) { // SAFETY: just checked the inclusive range @@ -49,12 +55,19 @@ macro_rules! define_valid_range_type { /// Immediate language UB if `val` is not within the valid range for this /// type, as it violates the validity invariant. #[inline] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::requires( + (val as $uint) >= ($low as $uint) && (val as $uint) <= ($high as $uint))] pub const unsafe fn new_unchecked(val: $int) -> Self { // SAFETY: Caller promised that `val` is within the valid range. unsafe { $name(val) } } #[inline] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::ensures( + |result| + (*result as $uint) >= ($low as $uint) && (*result as $uint) <= ($high as $uint))] pub const fn as_inner(self) -> $int { // SAFETY: This is a transparent wrapper, so unwrapping it is sound // (Not using `.0` due to MCP#807.) diff --git a/library/core/src/num/nonzero.rs b/library/core/src/num/nonzero.rs index d9184e3c9c229..8ae4ed09ceb7f 100644 --- a/library/core/src/num/nonzero.rs +++ b/library/core/src/num/nonzero.rs @@ -411,6 +411,14 @@ where #[must_use] #[inline] #[track_caller] + #[allow(unused_parens)] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::requires({ + let size = core::mem::size_of::(); + let ptr = &n as *const T as *const u8; + // SAFETY: to be confirmed + let slice = unsafe { core::slice::from_raw_parts(ptr, size) }; + !slice.iter().all(|&byte| byte == 0) })] pub const unsafe fn new_unchecked(n: T) -> Self { match Self::new(n) { Some(n) => n, @@ -452,6 +460,13 @@ where #[must_use] #[inline] #[track_caller] + #[allow(unused_parens)] + #[core::contracts::requires({ + let size = core::mem::size_of::(); + let ptr = n as *const T as *const u8; + // SAFETY: to be confirmed + let slice = unsafe { core::slice::from_raw_parts(ptr, size) }; + !slice.iter().all(|&byte| byte == 0) })] pub unsafe fn from_mut_unchecked(n: &mut T) -> &mut Self { match Self::from_mut(n) { Some(n) => n, @@ -771,6 +786,8 @@ macro_rules! nonzero_integer { #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline(always)] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::ensures(|result: &NonZero| result.get() > 0)] pub const fn count_ones(self) -> NonZero { // SAFETY: // `self` is non-zero, which means it has at least one bit set, which means @@ -1144,6 +1161,11 @@ macro_rules! nonzero_integer { #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::requires(self.get().checked_mul(other.get()).is_some())] + #[core::contracts::ensures( + move |result: &Self| + self.get().checked_mul(other.get()).is_some_and(|product| product == result.get()))] pub const unsafe fn unchecked_mul(self, other: Self) -> Self { // SAFETY: The caller ensures there is no overflow. unsafe { Self::new_unchecked(self.get().unchecked_mul(other.get())) } @@ -1552,6 +1574,11 @@ macro_rules! nonzero_integer_signedness_dependent_methods { #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::requires(self.get().checked_add(other).is_some())] + #[core::contracts::ensures( + move |result: &Self| + self.get().checked_add(other).is_some_and(|sum| sum == result.get()))] pub const unsafe fn unchecked_add(self, other: $Int) -> Self { // SAFETY: The caller ensures there is no overflow. unsafe { Self::new_unchecked(self.get().unchecked_add(other)) } diff --git a/library/core/src/num/uint_macros.rs b/library/core/src/num/uint_macros.rs index 752498bfbd815..6f47f056e79fa 100644 --- a/library/core/src/num/uint_macros.rs +++ b/library/core/src/num/uint_macros.rs @@ -702,6 +702,8 @@ macro_rules! uint_impl { without modifying the original"] #[inline(always)] #[track_caller] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::requires(!self.overflowing_add(rhs).1)] pub const unsafe fn unchecked_add(self, rhs: Self) -> Self { assert_unsafe_precondition!( check_language_ub, @@ -881,6 +883,8 @@ macro_rules! uint_impl { without modifying the original"] #[inline(always)] #[track_caller] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::requires(!self.overflowing_sub(rhs).1)] pub const unsafe fn unchecked_sub(self, rhs: Self) -> Self { assert_unsafe_precondition!( check_language_ub, @@ -1090,6 +1094,8 @@ macro_rules! uint_impl { without modifying the original"] #[inline(always)] #[track_caller] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::requires(!self.overflowing_mul(rhs).1)] pub const unsafe fn unchecked_mul(self, rhs: Self) -> Self { assert_unsafe_precondition!( check_language_ub, @@ -1449,6 +1455,7 @@ macro_rules! uint_impl { #[unstable(feature = "disjoint_bitor", issue = "135758")] #[rustc_const_unstable(feature = "disjoint_bitor", issue = "135758")] #[inline] + #[core::contracts::requires((self & other) == 0)] pub const unsafe fn unchecked_disjoint_bitor(self, other: Self) -> Self { assert_unsafe_precondition!( check_language_ub, @@ -1780,6 +1787,8 @@ macro_rules! uint_impl { without modifying the original"] #[inline(always)] #[track_caller] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::requires(rhs < <$ActualT>::BITS)] pub const unsafe fn unchecked_shl(self, rhs: u32) -> Self { assert_unsafe_precondition!( check_language_ub, @@ -1952,6 +1961,8 @@ macro_rules! uint_impl { without modifying the original"] #[inline(always)] #[track_caller] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::requires(rhs < <$ActualT>::BITS)] pub const unsafe fn unchecked_shr(self, rhs: u32) -> Self { assert_unsafe_precondition!( check_language_ub, @@ -2534,6 +2545,9 @@ macro_rules! uint_impl { #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline(always)] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::ensures( + move |result: &Self| *result == self << (rhs & (Self::BITS - 1)))] pub const fn wrapping_shl(self, rhs: u32) -> Self { // SAFETY: the masking by the bitsize of the type ensures that we do not shift // out of bounds @@ -2564,6 +2578,8 @@ macro_rules! uint_impl { #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline(always)] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::ensures(move |result| *result == self >> (rhs & (Self::BITS - 1)))] pub const fn wrapping_shr(self, rhs: u32) -> Self { // SAFETY: the masking by the bitsize of the type ensures that we do not shift // out of bounds diff --git a/library/core/src/ops/index_range.rs b/library/core/src/ops/index_range.rs index 507fa9460bea6..d69f2a09dbabb 100644 --- a/library/core/src/ops/index_range.rs +++ b/library/core/src/ops/index_range.rs @@ -20,6 +20,9 @@ impl IndexRange { /// - `start <= end` #[inline] #[track_caller] + #[rustc_allow_const_fn_unstable(contracts)] + #[allow(unused_parens)] + #[core::contracts::requires(start <= end)] pub(crate) const unsafe fn new_unchecked(start: usize, end: usize) -> Self { ub_checks::assert_unsafe_precondition!( check_library_ub, @@ -54,6 +57,8 @@ impl IndexRange { /// # Safety /// - Can only be called when `start < end`, aka when `len > 0`. #[inline] + #[allow(unused_parens)] + #[core::contracts::requires(self.start < self.end)] unsafe fn next_unchecked(&mut self) -> usize { debug_assert!(self.start < self.end); @@ -66,6 +71,8 @@ impl IndexRange { /// # Safety /// - Can only be called when `start < end`, aka when `len > 0`. #[inline] + #[allow(unused_parens)] + #[core::contracts::requires(self.start < self.end)] unsafe fn next_back_unchecked(&mut self) -> usize { debug_assert!(self.start < self.end); diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs index 451092709443b..3fa4b0d6c801b 100644 --- a/library/core/src/ptr/const_ptr.rs +++ b/library/core/src/ptr/const_ptr.rs @@ -346,6 +346,13 @@ impl *const T { #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")] #[inline(always)] #[track_caller] + #[rustc_allow_const_fn_unstable(contracts)] + #[allow(unused_parens)] + #[core::contracts::requires( + // Precondition 1: the computed offset `count * size_of::()` does not overflow `isize`. + // Precondition 2: adding the computed offset to `self` does not cause overflow. + count.checked_mul(core::mem::size_of::() as isize).is_some_and( + |computed_offset| (self as isize).checked_add(computed_offset).is_some()))] pub const unsafe fn offset(self, count: isize) -> *const T where T: Sized, @@ -609,6 +616,17 @@ impl *const T { #[rustc_const_stable(feature = "const_ptr_offset_from", since = "1.65.0")] #[inline] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + #[rustc_allow_const_fn_unstable(contracts)] + #[allow(unused_parens)] + #[core::contracts::requires( + // Ensures subtracting `origin` from `self` doesn't overflow + (self as isize).checked_sub(origin as isize).is_some() && + // Ensure the distance between `self` and `origin` is aligned to `T` + (self as isize - origin as isize) % (mem::size_of::() as isize) == 0)] + // FIXME: requires `T` to be `'static` + // #[core::contracts::ensures( + // move |result| + // *result == (self as isize - origin as isize) / (mem::size_of::() as isize))] pub const unsafe fn offset_from(self, origin: *const T) -> isize where T: Sized, @@ -826,6 +844,15 @@ impl *const T { #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")] #[inline(always)] #[track_caller] + #[rustc_allow_const_fn_unstable(contracts)] + #[allow(unused_parens)] + #[core::contracts::requires( + // Precondition 1: the computed offset `count * size_of::()` does not overflow `isize`. + // Precondition 2: adding the computed offset to `self` does not cause overflow. + count.checked_mul(core::mem::size_of::()).is_some_and( + |computed_offset| + computed_offset <= isize::MAX as usize && + (self as isize).checked_add(computed_offset as isize).is_some()))] pub const unsafe fn add(self, count: usize) -> Self where T: Sized, @@ -932,6 +959,15 @@ impl *const T { #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")] #[inline(always)] #[track_caller] + #[rustc_allow_const_fn_unstable(contracts)] + #[allow(unused_parens)] + #[core::contracts::requires( + // Precondition 1: the computed offset `count * size_of::()` does not overflow `isize`. + // Precondition 2: substracting the computed offset from `self` does not cause overflow. + count.checked_mul(core::mem::size_of::()).is_some_and( + |computed_offset| + computed_offset <= isize::MAX as usize && + (self as isize).checked_sub(computed_offset as isize).is_some()))] pub const unsafe fn sub(self, count: usize) -> Self where T: Sized, diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs index b29d267654252..9cd28a454188f 100644 --- a/library/core/src/ptr/mod.rs +++ b/library/core/src/ptr/mod.rs @@ -2220,6 +2220,42 @@ pub unsafe fn write_volatile(dst: *mut T, src: T) { /// /// Any questions go to @nagisa. #[allow(ptr_to_integer_transmute_in_consts)] +#[allow(unused_parens)] +#[core::contracts::requires(a.is_power_of_two())] +// FIXME: requires `T` to be `'static` +// #[core::contracts::ensures(move |result| { +// let stride = mem::size_of::(); +// // ZSTs +// if stride == 0 { +// if p.addr() % a == 0 { +// return *result == 0; +// } else { +// return *result == usize::MAX; +// } +// } +// +// // In this case, the pointer cannot be aligned +// if (a % stride == 0) && (p.addr() % stride != 0) { +// return *result == usize::MAX; +// } +// +// // Checking if the answer should indeed be usize::MAX when a % stride != 0 +// // requires computing gcd(a, stride), which could be done using cttz as the implementation +// // does. +// if a % stride != 0 && *result == usize::MAX { +// return true; +// } +// +// // If we reach this case, either: +// // - a % stride == 0 and p.addr() % stride == 0, so it is definitely possible to align the +// // pointer +// // - a % stride != 0 and result != usize::MAX, so align_offset is claiming that it's possible +// // to align the pointer +// // Check that applying the returned result does indeed produce an aligned address +// let product = usize::wrapping_mul(*result, stride); +// let new_addr = usize::wrapping_add(product, p.addr()); +// *result != usize::MAX && new_addr % a == 0 +// })] pub(crate) unsafe fn align_offset(p: *const T, a: usize) -> usize { // FIXME(#75598): Direct use of these intrinsics improves codegen significantly at opt-level <= // 1, where the method versions of these operations are not inlined. @@ -2237,6 +2273,10 @@ pub(crate) unsafe fn align_offset(p: *const T, a: usize) -> usize { /// /// Implementation of this function shall not panic. Ever. #[inline] + #[rustc_allow_const_fn_unstable(contracts)] + #[allow(unused_parens)] + #[core::contracts::requires(m.is_power_of_two() && x < m && x % 2 != 0)] + #[core::contracts::ensures(move |result| wrapping_mul(*result, x) % m == 1)] const unsafe fn mod_inv(x: usize, m: usize) -> usize { /// Multiplicative modular inverse table modulo 2⁴ = 16. /// diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs index ba78afc7ea114..14e0dbde2acf2 100644 --- a/library/core/src/ptr/mut_ptr.rs +++ b/library/core/src/ptr/mut_ptr.rs @@ -349,6 +349,15 @@ impl *mut T { #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")] #[inline(always)] #[track_caller] + // Note: It is the caller's responsibility to ensure that `self` is non-null and properly aligned. + // These conditions are not verified as part of the preconditions. + #[rustc_allow_const_fn_unstable(contracts)] + #[allow(unused_parens)] + #[core::contracts::requires( + // Precondition 1: the computed offset `count * size_of::()` does not overflow `isize`. + // Precondition 2: adding the computed offset to `self` does not cause overflow. + count.checked_mul(core::mem::size_of::() as isize).is_some_and( + |computed_offset| (self as isize).checked_add(computed_offset).is_some()))] pub const unsafe fn offset(self, count: isize) -> *mut T where T: Sized, @@ -790,6 +799,18 @@ impl *mut T { #[rustc_const_stable(feature = "const_ptr_offset_from", since = "1.65.0")] #[inline(always)] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + #[rustc_allow_const_fn_unstable(contracts)] + #[allow(unused_parens)] + #[core::contracts::requires( + // Ensuring that subtracting 'origin' from 'self' doesn't result in an overflow + (self as isize).checked_sub(origin as isize).is_some() && + // Ensuring that the distance between 'self' and 'origin' is aligned to `T` + (self as isize - origin as isize) % (mem::size_of::() as isize) == 0)] + // FIXME: requires `T` to be `'static` + // #[core::contracts::ensures( + // move |result| + // core::mem::size_of::() == 0 || + // (*result == (self as isize - origin as isize) / (mem::size_of::() as isize)))] pub const unsafe fn offset_from(self, origin: *const T) -> isize where T: Sized, @@ -924,6 +945,17 @@ impl *mut T { #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")] #[inline(always)] #[track_caller] + // Note: It is the caller's responsibility to ensure that `self` is non-null and properly + // aligned. These conditions are not verified as part of the preconditions. + #[rustc_allow_const_fn_unstable(contracts)] + #[allow(unused_parens)] + #[core::contracts::requires( + // Precondition 1: the computed offset `count * size_of::()` does not overflow `isize`. + // Precondition 2: adding the computed offset to `self` does not cause overflow. + count.checked_mul(core::mem::size_of::()).is_some_and( + |computed_offset| + computed_offset <= isize::MAX as usize && + (self as isize).checked_add(computed_offset as isize).is_some()))] pub const unsafe fn add(self, count: usize) -> Self where T: Sized, @@ -1030,6 +1062,17 @@ impl *mut T { #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")] #[inline(always)] #[track_caller] + // Note: It is the caller's responsibility to ensure that `self` is non-null and properly + // aligned. These conditions are not verified as part of the preconditions. + #[rustc_allow_const_fn_unstable(contracts)] + #[allow(unused_parens)] + #[core::contracts::requires( + // Precondition 1: the computed offset `count * size_of::()` does not overflow `isize`. + // Precondition 2: substracting the computed offset from `self` does not cause overflow. + count.checked_mul(core::mem::size_of::()).is_some_and( + |computed_offset| + computed_offset <= isize::MAX as usize && + (self as isize).checked_sub(computed_offset as isize).is_some()))] pub const unsafe fn sub(self, count: usize) -> Self where T: Sized, diff --git a/library/core/src/ptr/non_null.rs b/library/core/src/ptr/non_null.rs index 10f83120428b9..fd83f6dc6f989 100644 --- a/library/core/src/ptr/non_null.rs +++ b/library/core/src/ptr/non_null.rs @@ -127,6 +127,9 @@ impl NonNull { #[rustc_const_stable(feature = "const_nonnull_dangling", since = "1.36.0")] #[must_use] #[inline] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::ensures( + |result: &NonNull| !result.pointer.is_null() && result.pointer.is_aligned())] pub const fn dangling() -> Self { let align = crate::ptr::Alignment::of::(); NonNull::without_provenance(align.as_nonzero()) @@ -165,6 +168,10 @@ impl NonNull { #[inline] #[must_use] #[unstable(feature = "ptr_as_uninit", issue = "75402")] + // FIXME: requires `T` to be `'static` + // #[rustc_allow_const_fn_unstable(contracts)] + // #[core::contracts::ensures( + // move |result: &&'a MaybeUninit| core::ptr::eq(*result, self.cast().as_ptr()))] pub const unsafe fn as_uninit_ref<'a>(self) -> &'a MaybeUninit { // SAFETY: the caller must guarantee that `self` meets all the // requirements for a reference. @@ -188,6 +195,10 @@ impl NonNull { #[inline] #[must_use] #[unstable(feature = "ptr_as_uninit", issue = "75402")] + // FIXME: requires `T` to be `'static` + // #[rustc_allow_const_fn_unstable(contracts)] + // #[core::contracts::ensures( + // move |result: &&mut MaybeUninit| core::ptr::eq(*result, self.cast().as_ptr()))] pub const unsafe fn as_uninit_mut<'a>(self) -> &'a mut MaybeUninit { // SAFETY: the caller must guarantee that `self` meets all the // requirements for a reference. @@ -230,6 +241,11 @@ impl NonNull { #[rustc_const_stable(feature = "const_nonnull_new_unchecked", since = "1.25.0")] #[inline] #[track_caller] + #[allow(unused_parens)] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::requires(!ptr.is_null())] + // FIXME: requires `T` to be `'static` + // #[core::contracts::ensures(move |result: &Self| result.as_ptr() == ptr)] pub const unsafe fn new_unchecked(ptr: *mut T) -> Self { // SAFETY: the caller must guarantee that `ptr` is non-null. unsafe { @@ -266,6 +282,12 @@ impl NonNull { #[stable(feature = "nonnull", since = "1.25.0")] #[rustc_const_stable(feature = "const_nonnull_new", since = "1.85.0")] #[inline] + // FIXME: requires `T` to be `'static` + // #[rustc_allow_const_fn_unstable(contracts)] + // #[core::contracts::ensures( + // move |result: &Option| + // result.is_some() == !ptr.is_null() && + // (result.is_none() || result.expect("ptr is null!").as_ptr() == ptr))] pub const fn new(ptr: *mut T) -> Option { if !ptr.is_null() { // SAFETY: The pointer is already checked and is not null @@ -301,6 +323,8 @@ impl NonNull { /// [`std::ptr::from_raw_parts`]: crate::ptr::from_raw_parts #[unstable(feature = "ptr_metadata", issue = "81513")] #[inline] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::ensures(|result: &NonNull| !result.pointer.is_null())] pub const fn from_raw_parts( data_pointer: NonNull, metadata: ::Metadata, @@ -318,6 +342,12 @@ impl NonNull { #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] + // FIXME: requires `T` to be `'static` + // #[rustc_allow_const_fn_unstable(contracts)] + // #[core::contracts::ensures( + // move |(data_ptr, _): &(NonNull<()>, ::Metadata)| + // !data_ptr.as_ptr().is_null() && + // self.as_ptr() as *const () == data_ptr.as_ptr() as *const ())] pub const fn to_raw_parts(self) -> (NonNull<()>, ::Metadata) { (self.cast(), super::metadata(self.as_ptr())) } @@ -330,6 +360,9 @@ impl NonNull { #[must_use] #[inline] #[stable(feature = "strict_provenance", since = "1.84.0")] + // FIXME: requires `T` to be `'static` + // #[core::contracts::ensures( + // move |result: &NonZero| result.get() == self.as_ptr() as *const() as usize)] pub fn addr(self) -> NonZero { // SAFETY: The pointer is guaranteed by the type to be non-null, // meaning that the address will be non-zero. @@ -358,6 +391,7 @@ impl NonNull { #[must_use] #[inline] #[stable(feature = "strict_provenance", since = "1.84.0")] + #[core::contracts::ensures(move |result: &Self| result.addr() == addr)] pub fn with_addr(self, addr: NonZero) -> Self { // SAFETY: The result of `ptr::from::with_addr` is non-null because `addr` is guaranteed to be non-zero. unsafe { NonNull::new_unchecked(self.as_ptr().with_addr(addr.get()) as *mut _) } @@ -398,6 +432,9 @@ impl NonNull { #[rustc_never_returns_null_ptr] #[must_use] #[inline(always)] + // FIXME: requires `T` to be `'static` + // #[rustc_allow_const_fn_unstable(contracts)] + // #[core::contracts::ensures(move |result: &*mut T| *result == self.pointer as *mut T)] pub const fn as_ptr(self) -> *mut T { // This is a transmute for the same reasons as `NonZero::get`. @@ -437,6 +474,9 @@ impl NonNull { #[rustc_const_stable(feature = "const_nonnull_as_ref", since = "1.73.0")] #[must_use] #[inline(always)] + // FIXME: requires `T` to be `'static` + // #[rustc_allow_const_fn_unstable(contracts)] + // #[core::contracts::ensures(move |result: &&T| core::ptr::eq(*result, self.as_ptr()))] pub const unsafe fn as_ref<'a>(&self) -> &'a T { // SAFETY: the caller must guarantee that `self` meets all the // requirements for a reference. @@ -475,6 +515,9 @@ impl NonNull { #[rustc_const_stable(feature = "const_ptr_as_ref", since = "1.83.0")] #[must_use] #[inline(always)] + // FIXME: requires `T` to be `'static` + // #[rustc_allow_const_fn_unstable(contracts)] + // #[core::contracts::ensures(|result: &&'a mut T| core::ptr::eq(*result, self.as_ptr()))] pub const unsafe fn as_mut<'a>(&mut self) -> &'a mut T { // SAFETY: the caller must guarantee that `self` meets all the // requirements for a mutable reference. @@ -499,6 +542,10 @@ impl NonNull { #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] + // FIXME: requires `T` to be `'static` + // #[rustc_allow_const_fn_unstable(contracts)] + // #[core::contracts::ensures( + // move |result: &NonNull| result.as_ptr().addr() == self.as_ptr().addr())] pub const fn cast(self) -> NonNull { // SAFETY: `self` is a `NonNull` pointer which is necessarily non-null unsafe { NonNull { pointer: self.as_ptr() as *mut U } } @@ -572,6 +619,15 @@ impl NonNull { #[must_use = "returns a new pointer rather than modifying its argument"] #[stable(feature = "non_null_convenience", since = "1.80.0")] #[rustc_const_stable(feature = "non_null_convenience", since = "1.80.0")] + #[rustc_allow_const_fn_unstable(contracts)] + #[allow(unused_parens)] + #[core::contracts::requires( + count.checked_mul(core::mem::size_of::() as isize).is_some() && + (self.as_ptr() as isize).checked_add( + count.wrapping_mul(core::mem::size_of::() as isize)).is_some())] + // FIXME: requires `T` to be `'static` + // #[core::contracts::ensures( + // move |result: &Self| result.as_ptr() == self.as_ptr().wrapping_offset(count))] pub const unsafe fn offset(self, count: isize) -> Self where T: Sized, @@ -598,6 +654,12 @@ impl NonNull { #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces #[stable(feature = "non_null_convenience", since = "1.80.0")] #[rustc_const_stable(feature = "non_null_convenience", since = "1.80.0")] + #[rustc_allow_const_fn_unstable(contracts)] + #[allow(unused_parens)] + #[core::contracts::requires((self.as_ptr().addr() as isize).checked_add(count).is_some())] + // FIXME: requires `T` to be `'static` + // #[core::contracts::ensures( + // move |result: &Self| result.as_ptr() == self.as_ptr().wrapping_byte_offset(count))] pub const unsafe fn byte_offset(self, count: isize) -> Self { // SAFETY: the caller must uphold the safety contract for `offset` and `byte_offset` has // the same safety contract. @@ -648,6 +710,16 @@ impl NonNull { #[must_use = "returns a new pointer rather than modifying its argument"] #[stable(feature = "non_null_convenience", since = "1.80.0")] #[rustc_const_stable(feature = "non_null_convenience", since = "1.80.0")] + #[rustc_allow_const_fn_unstable(contracts)] + #[allow(unused_parens)] + #[core::contracts::requires( + count.checked_mul(core::mem::size_of::()).is_some() && + count * core::mem::size_of::() <= isize::MAX as usize && + (self.pointer as isize).checked_add( + count as isize * core::mem::size_of::() as isize).is_some())] + // FIXME: requires `T` to be `'static` + // #[core::contracts::ensures( + // move |result: &Self| result.as_ptr() == unsafe { self.as_ptr().offset(count as isize) })] pub const unsafe fn add(self, count: usize) -> Self where T: Sized, @@ -725,6 +797,15 @@ impl NonNull { #[must_use = "returns a new pointer rather than modifying its argument"] #[stable(feature = "non_null_convenience", since = "1.80.0")] #[rustc_const_stable(feature = "non_null_convenience", since = "1.80.0")] + #[rustc_allow_const_fn_unstable(contracts)] + #[allow(unused_parens)] + #[core::contracts::requires( + count.checked_mul(core::mem::size_of::()).is_some() && + count * core::mem::size_of::() <= isize::MAX as usize)] + // FIXME: requires `T` to be `'static` + // #[core::contracts::ensures( + // move |result: &NonNull| + // result.as_ptr() == unsafe { self.as_ptr().offset(-(count as isize)) })] pub const unsafe fn sub(self, count: usize) -> Self where T: Sized, @@ -854,6 +935,18 @@ impl NonNull { #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces #[stable(feature = "non_null_convenience", since = "1.80.0")] #[rustc_const_stable(feature = "non_null_convenience", since = "1.80.0")] + #[rustc_allow_const_fn_unstable(contracts)] + #[allow(unused_parens)] + #[core::contracts::requires( + self.as_ptr().addr().checked_sub(origin.as_ptr().addr()).is_some_and( + |distance| distance % core::mem::size_of::() == 0) || + origin.as_ptr().addr().checked_sub(self.as_ptr().addr()).is_some_and( + |distance| distance % core::mem::size_of::() == 0))] + // FIXME: requires `T` to be `'static` + // #[core::contracts::ensures( + // move |result: &isize| + // *result == (self.as_ptr() as isize - origin.as_ptr() as isize) / + // core::mem::size_of::() as isize)] pub const unsafe fn offset_from(self, origin: NonNull) -> isize where T: Sized, @@ -875,6 +968,12 @@ impl NonNull { #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces #[stable(feature = "non_null_convenience", since = "1.80.0")] #[rustc_const_stable(feature = "non_null_convenience", since = "1.80.0")] + // FIXME: requires `T` to be `'static` + // #[rustc_allow_const_fn_unstable(contracts)] + // #[core::contracts::ensures( + // move |result: &isize| + // *result == + // unsafe { (self.as_ptr() as *const u8).offset_from(origin.as_ptr() as *const u8) })] pub const unsafe fn byte_offset_from(self, origin: NonNull) -> isize { // SAFETY: the caller must uphold the safety contract for `byte_offset_from`. unsafe { self.as_ptr().byte_offset_from(origin.as_ptr()) } @@ -945,6 +1044,16 @@ impl NonNull { #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces #[stable(feature = "ptr_sub_ptr", since = "1.87.0")] #[rustc_const_stable(feature = "const_ptr_sub_ptr", since = "1.87.0")] + #[rustc_allow_const_fn_unstable(contracts)] + #[allow(unused_parens)] + #[core::contracts::requires( + self.as_ptr().addr().checked_sub(subtracted.as_ptr().addr()).is_some() && + (self.as_ptr().addr()) >= (subtracted.as_ptr().addr()) && + (self.as_ptr().addr() - subtracted.as_ptr().addr()) % core::mem::size_of::() == 0)] + // FIXME: requires `T` to be `'static` + // #[core::contracts::ensures( + // move |result: &usize| + // *result == unsafe { self.as_ptr().offset_from(subtracted.as_ptr()) } as usize)] pub const unsafe fn offset_from_unsigned(self, subtracted: NonNull) -> usize where T: Sized, @@ -1043,6 +1152,11 @@ impl NonNull { #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces #[stable(feature = "non_null_convenience", since = "1.80.0")] #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.83.0")] + #[rustc_allow_const_fn_unstable(contracts)] + #[allow(unused_parens)] + #[core::contracts::requires( + count.checked_mul(core::mem::size_of::()).map_or_else( + || false, |size| size <= isize::MAX as usize))] pub const unsafe fn copy_to(self, dest: NonNull, count: usize) where T: Sized, @@ -1063,6 +1177,11 @@ impl NonNull { #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces #[stable(feature = "non_null_convenience", since = "1.80.0")] #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.83.0")] + #[rustc_allow_const_fn_unstable(contracts)] + #[allow(unused_parens)] + #[core::contracts::requires( + count.checked_mul(core::mem::size_of::()).map_or_else( + || false, |size| size <= isize::MAX as usize))] pub const unsafe fn copy_to_nonoverlapping(self, dest: NonNull, count: usize) where T: Sized, @@ -1083,6 +1202,11 @@ impl NonNull { #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces #[stable(feature = "non_null_convenience", since = "1.80.0")] #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.83.0")] + #[rustc_allow_const_fn_unstable(contracts)] + #[allow(unused_parens)] + #[core::contracts::requires( + count.checked_mul(core::mem::size_of::()).map_or_else( + || false, |size| size <= isize::MAX as usize))] pub const unsafe fn copy_from(self, src: NonNull, count: usize) where T: Sized, @@ -1103,6 +1227,11 @@ impl NonNull { #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces #[stable(feature = "non_null_convenience", since = "1.80.0")] #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.83.0")] + #[rustc_allow_const_fn_unstable(contracts)] + #[allow(unused_parens)] + #[core::contracts::requires( + count.checked_mul(core::mem::size_of::()).map_or_else( + || false, |size| size <= isize::MAX as usize))] pub const unsafe fn copy_from_nonoverlapping(self, src: NonNull, count: usize) where T: Sized, @@ -1152,6 +1281,11 @@ impl NonNull { #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces #[stable(feature = "non_null_convenience", since = "1.80.0")] #[rustc_const_stable(feature = "const_ptr_write", since = "1.83.0")] + #[rustc_allow_const_fn_unstable(contracts)] + #[allow(unused_parens)] + #[core::contracts::requires( + count.checked_mul(core::mem::size_of::() as usize).is_some_and( + |byte_count| byte_count.wrapping_add(self.as_ptr() as usize) <= isize::MAX as usize))] pub const unsafe fn write_bytes(self, val: u8, count: usize) where T: Sized, @@ -1285,6 +1419,38 @@ impl NonNull { #[inline] #[must_use] #[stable(feature = "non_null_convenience", since = "1.80.0")] + // FIXME: requires `T` to be `'static` + // #[core::contracts::ensures(move |result| { + // // Post-condition reference: https://github.com/model-checking/verify-rust-std/pull/69/files + // let stride = crate::mem::size_of::(); + // // ZSTs + // if stride == 0 { + // if self.pointer.addr() % align == 0 { + // return *result == 0; + // } else { + // return *result == usize::MAX; + // } + // } + // // In this case, the pointer cannot be aligned + // if (align % stride == 0) && (self.pointer.addr() % stride != 0) { + // return *result == usize::MAX; + // } + // // Checking if the answer should indeed be usize::MAX when a % stride != 0 requires + // // computing gcd(align, stride), which could be done using cttz as the implementation of + // // ptr::align_offset does. + // if align % stride != 0 && *result == usize::MAX { + // return true; + // } + // // If we reach this case, either: + // // - align % stride == 0 and self.pointer.addr() % stride == 0, so it is definitely + // // possible to align the pointer + // // - align % stride != 0 and result != usize::MAX, so align_offset is claiming that it's + // // possible to align the pointer + // // Check that applying the returned result does indeed produce an aligned address + // let product = usize::wrapping_mul(*result, stride); + // let new_addr = usize::wrapping_add(product, self.pointer.addr()); + // *result != usize::MAX && new_addr % align == 0 + // })] pub fn align_offset(self, align: usize) -> usize where T: Sized, @@ -1319,6 +1485,9 @@ impl NonNull { #[inline] #[must_use] #[stable(feature = "pointer_is_aligned", since = "1.79.0")] + // FIXME: requires `T` to be `'static` + // #[core::contracts::ensures( + // move |result: &bool| *result == (self.as_ptr().addr() % core::mem::align_of::() == 0))] pub fn is_aligned(self) -> bool where T: Sized, @@ -1359,6 +1528,10 @@ impl NonNull { #[inline] #[must_use] #[unstable(feature = "pointer_is_aligned_to", issue = "96284")] + #[allow(unused_parens)] + #[core::contracts::requires(align.is_power_of_two())] + // FIXME: requires `T` to be `'static` + // #[core::contracts::ensures(move |result: &bool| *result == (self.as_ptr().addr() % align == 0))] pub fn is_aligned_to(self, align: usize) -> bool { self.as_ptr().is_aligned_to(align) } @@ -1412,6 +1585,13 @@ impl NonNull<[T]> { #[rustc_const_stable(feature = "const_slice_from_raw_parts_mut", since = "1.83.0")] #[must_use] #[inline] + // FIXME: requires `T` to be `'static` + // #[rustc_allow_const_fn_unstable(contracts)] + // #[core::contracts::ensures( + // move |result: &NonNull<[T]>| + // !result.pointer.is_null() && + // result.pointer as *const T == data.pointer && + // unsafe { result.as_ref() }.len() == len)] pub const fn slice_from_raw_parts(data: NonNull, len: usize) -> Self { // SAFETY: `data` is a `NonNull` pointer which is necessarily non-null unsafe { Self::new_unchecked(super::slice_from_raw_parts_mut(data.as_ptr(), len)) } @@ -1472,6 +1652,10 @@ impl NonNull<[T]> { #[inline] #[must_use] #[unstable(feature = "slice_ptr_get", issue = "74265")] + #[rustc_allow_const_fn_unstable(contracts)] + // FIXME: requires `T` to be `'static` + // #[core::contracts::ensures( + // move |result: &NonNull| result.as_ptr().addr() == self.as_ptr().addr())] pub const fn as_non_null_ptr(self) -> NonNull { self.cast() } @@ -1491,6 +1675,9 @@ impl NonNull<[T]> { #[must_use] #[unstable(feature = "slice_ptr_get", issue = "74265")] #[rustc_never_returns_null_ptr] + // FIXME: requires `T` to be `'static` + // #[rustc_allow_const_fn_unstable(contracts)] + // #[core::contracts::ensures(move |result: &*mut T| *result == self.pointer as *mut T)] pub const fn as_mut_ptr(self) -> *mut T { self.as_non_null_ptr().as_ptr() } @@ -1535,6 +1722,19 @@ impl NonNull<[T]> { #[inline] #[must_use] #[unstable(feature = "ptr_as_uninit", issue = "75402")] + #[rustc_allow_const_fn_unstable(contracts)] + #[allow(unused_parens)] + #[core::contracts::requires( + // Ensure the pointer is properly aligned + self.as_ptr().cast::().align_offset(core::mem::align_of::()) == 0 && + // Ensure the slice size does not exceed isize::MAX + self.len().checked_mul(core::mem::size_of::()).is_some() && + self.len() * core::mem::size_of::() <= isize::MAX as usize && + self.as_ptr().addr().checked_add(self.len() * core::mem::size_of::()).is_some())] + // FIXME: requires `T` to be `'static` + // #[core::contracts::ensures( + // move |result: &&[MaybeUninit]| + // result.len() == self.len() && core::ptr::eq(result.as_ptr(), self.cast().as_ptr()))] pub const unsafe fn as_uninit_slice<'a>(self) -> &'a [MaybeUninit] { // SAFETY: the caller must uphold the safety contract for `as_uninit_slice`. unsafe { slice::from_raw_parts(self.cast().as_ptr(), self.len()) } @@ -1599,6 +1799,19 @@ impl NonNull<[T]> { #[inline] #[must_use] #[unstable(feature = "ptr_as_uninit", issue = "75402")] + #[rustc_allow_const_fn_unstable(contracts)] + #[allow(unused_parens)] + #[core::contracts::requires( + // Ensure the pointer is properly aligned + self.as_ptr().cast::().align_offset(core::mem::align_of::()) == 0 && + // Ensure the slice size does not exceed isize::MAX + self.len().checked_mul(core::mem::size_of::()).is_some() && + self.len() * core::mem::size_of::() <= isize::MAX as usize && + self.as_ptr().addr().checked_add(self.len() * core::mem::size_of::()).is_some())] + // FIXME: requires `T` to be `'static` + // #[core::contracts::ensures( + // move |result: &&mut [MaybeUninit]| + // result.len() == self.len() && core::ptr::eq(result.as_ptr(), self.cast().as_ptr()))] pub const unsafe fn as_uninit_slice_mut<'a>(self) -> &'a mut [MaybeUninit] { // SAFETY: the caller must uphold the safety contract for `as_uninit_slice_mut`. unsafe { slice::from_raw_parts_mut(self.cast().as_ptr(), self.len()) } diff --git a/library/core/src/ptr/unique.rs b/library/core/src/ptr/unique.rs index cdc8b6cc936df..f69cdeb441ab0 100644 --- a/library/core/src/ptr/unique.rs +++ b/library/core/src/ptr/unique.rs @@ -83,6 +83,11 @@ impl Unique { /// /// `ptr` must be non-null. #[inline] + #[rustc_allow_const_fn_unstable(contracts)] + #[allow(unused_parens)] + #[core::contracts::requires(!ptr.is_null())] + // FIXME: requires `T` to be `'static` + // #[core::contracts::ensures(move |result: &Unique| result.as_ptr() == ptr)] pub const unsafe fn new_unchecked(ptr: *mut T) -> Self { // SAFETY: the caller must guarantee that `ptr` is non-null. unsafe { Unique { pointer: NonNull::new_unchecked(ptr), _marker: PhantomData } } @@ -90,6 +95,11 @@ impl Unique { /// Creates a new `Unique` if `ptr` is non-null. #[inline] + #[rustc_allow_const_fn_unstable(contracts)] + // FIXME: requires `T` to be `'static` + // #[core::contracts::ensures( + // move |result: &Option>| + // result.is_none() == ptr.is_null() && (result.is_none() || result.unwrap().as_ptr() == ptr))] pub const fn new(ptr: *mut T) -> Option { if let Some(pointer) = NonNull::new(ptr) { Some(Unique { pointer, _marker: PhantomData }) @@ -107,6 +117,8 @@ impl Unique { /// Acquires the underlying `*mut` pointer. #[must_use = "`self` will be dropped if the result is not used"] #[inline] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::ensures(|result: &*mut T| !result.is_null())] pub const fn as_ptr(self) -> *mut T { self.pointer.as_ptr() } @@ -114,6 +126,9 @@ impl Unique { /// Acquires the underlying `*mut` pointer. #[must_use = "`self` will be dropped if the result is not used"] #[inline] + // FIXME: requires `T` to be `'static` + // #[rustc_allow_const_fn_unstable(contracts)] + // #[core::contracts::ensures(move |result: &NonNull| result.as_ptr() == self.pointer.as_ptr())] pub const fn as_non_null_ptr(self) -> NonNull { self.pointer } diff --git a/library/core/src/range/iter.rs b/library/core/src/range/iter.rs index 24efd4a204a5f..117e7af75b8ad 100644 --- a/library/core/src/range/iter.rs +++ b/library/core/src/range/iter.rs @@ -104,6 +104,8 @@ impl Iterator for IterRange { } #[inline] + #[allow(unused_parens)] + #[core::contracts::requires(idx < self.size_hint().0)] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item where Self: TrustedRandomAccessNoCoerce, diff --git a/library/core/src/slice/iter.rs b/library/core/src/slice/iter.rs index ae910e0525209..0aa2cd6d46d22 100644 --- a/library/core/src/slice/iter.rs +++ b/library/core/src/slice/iter.rs @@ -1403,6 +1403,8 @@ impl<'a, T> Iterator for Windows<'a, T> { } } + #[allow(unused_parens)] + #[core::contracts::requires(idx < self.len())] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item { // SAFETY: since the caller guarantees that `i` is in bounds, // which means that `i` cannot overflow an `isize`, and the @@ -1560,6 +1562,8 @@ impl<'a, T> Iterator for Chunks<'a, T> { } } + #[allow(unused_parens)] + #[core::contracts::requires(idx < self.len())] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item { let start = idx * self.chunk_size; // SAFETY: the caller guarantees that `i` is in bounds, @@ -1749,6 +1753,8 @@ impl<'a, T> Iterator for ChunksMut<'a, T> { } } + #[allow(unused_parens)] + #[core::contracts::requires(idx < self.len())] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item { let start = idx * self.chunk_size; // SAFETY: see comments for `Chunks::__iterator_get_unchecked` and `self.v`. @@ -1947,6 +1953,8 @@ impl<'a, T> Iterator for ChunksExact<'a, T> { self.next_back() } + #[allow(unused_parens)] + #[core::contracts::requires(idx < self.len())] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item { let start = idx * self.chunk_size; // SAFETY: mostly identical to `Chunks::__iterator_get_unchecked`. @@ -2108,6 +2116,8 @@ impl<'a, T> Iterator for ChunksExactMut<'a, T> { self.next_back() } + #[allow(unused_parens)] + #[core::contracts::requires(idx < self.len())] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item { let start = idx * self.chunk_size; // SAFETY: see comments for `Chunks::__iterator_get_unchecked` and `self.v`. @@ -2414,6 +2424,8 @@ impl<'a, T> Iterator for RChunks<'a, T> { } } + #[allow(unused_parens)] + #[core::contracts::requires(idx < self.len())] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item { let end = self.v.len() - idx * self.chunk_size; let start = match end.checked_sub(self.chunk_size) { @@ -2594,6 +2606,8 @@ impl<'a, T> Iterator for RChunksMut<'a, T> { } } + #[allow(unused_parens)] + #[core::contracts::requires(idx < self.len())] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item { let end = self.v.len() - idx * self.chunk_size; let start = match end.checked_sub(self.chunk_size) { @@ -2787,6 +2801,8 @@ impl<'a, T> Iterator for RChunksExact<'a, T> { self.next_back() } + #[allow(unused_parens)] + #[core::contracts::requires(idx < self.len())] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item { let end = self.v.len() - idx * self.chunk_size; let start = end - self.chunk_size; @@ -2953,6 +2969,8 @@ impl<'a, T> Iterator for RChunksExactMut<'a, T> { self.next_back() } + #[allow(unused_parens)] + #[core::contracts::requires(idx < self.len())] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item { let end = self.v.len() - idx * self.chunk_size; let start = end - self.chunk_size; diff --git a/library/core/src/slice/iter/macros.rs b/library/core/src/slice/iter/macros.rs index 7c1ed3fe8a246..311e32b72212d 100644 --- a/library/core/src/slice/iter/macros.rs +++ b/library/core/src/slice/iter/macros.rs @@ -77,6 +77,7 @@ macro_rules! iterator { /// /// The iterator must not be empty #[inline] + #[core::contracts::requires(!is_empty!(self))] unsafe fn next_back_unchecked(&mut self) -> $elem { // SAFETY: the caller promised it's not empty, so // the offsetting is in-bounds and there's an element to return. @@ -96,6 +97,7 @@ macro_rules! iterator { // returning the old start. // Unsafe because the offset must not exceed `self.len()`. #[inline(always)] + #[core::contracts::requires(offset <= len!(self))] unsafe fn post_inc_start(&mut self, offset: usize) -> NonNull { let old = self.ptr; @@ -115,6 +117,7 @@ macro_rules! iterator { // returning the new end. // Unsafe because the offset must not exceed `self.len()`. #[inline(always)] + #[core::contracts::requires(offset <= len!(self))] unsafe fn pre_dec_end(&mut self, offset: usize) -> NonNull { if_zst!(mut self, // SAFETY: By our precondition, `offset` can be at most the @@ -392,6 +395,7 @@ macro_rules! iterator { } #[inline] + #[core::contracts::requires(idx < len!(self))] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item { // SAFETY: the caller must guarantee that `i` is in bounds of // the underlying slice, so `i` cannot overflow an `isize`, and @@ -460,6 +464,7 @@ macro_rules! iterator { impl<'a, T> UncheckedIterator for $name<'a, T> { #[inline] + #[core::contracts::requires(!is_empty!(self))] unsafe fn next_unchecked(&mut self) -> $elem { // SAFETY: The caller promised there's at least one more item. unsafe { diff --git a/library/core/src/slice/mod.rs b/library/core/src/slice/mod.rs index f7f5ee819b2e4..166a61a20d9f2 100644 --- a/library/core/src/slice/mod.rs +++ b/library/core/src/slice/mod.rs @@ -4053,6 +4053,25 @@ impl [T] { /// ``` #[stable(feature = "slice_align_to", since = "1.30.0")] #[must_use] + // FIXME: requires `&self` to be `'static` + // #[core::contracts::ensures( + // move |(prefix, middle, suffix): &(&[T], &[U], &[T])| + // // The following clause guarantees that middle is of maximum size within self If U or T are + // // ZSTs, then middle has size zero, so we adapt the check in that case + // (((U::IS_ZST || T::IS_ZST) && prefix.len() == self.len()) || ( + // prefix.len() * size_of::() < align_of::() && + // suffix.len() * size_of::() < size_of::() + // )) && + // // Either align_to just returns self in the prefix, or the 3 returned slices should be + // // sequential, contiguous, and have same total length as self + // prefix.as_ptr() == self.as_ptr() && ( + // prefix.len() == self.len() || ( + // unsafe { prefix.as_ptr().add(prefix.len()) } as *const u8 == + // middle.as_ptr() as *const u8 && + // unsafe { middle.as_ptr().add(middle.len()) } as *const u8 == + // suffix.as_ptr() as *const u8 && + // unsafe { suffix.as_ptr().add(suffix.len()) } == + // unsafe { self.as_ptr().add(self.len()) })))] pub unsafe fn align_to(&self) -> (&[T], &[U], &[T]) { // Note that most of this function will be constant-evaluated, if U::IS_ZST || T::IS_ZST { diff --git a/library/core/src/str/iter.rs b/library/core/src/str/iter.rs index d2985d8a18669..09da2cccdcce0 100644 --- a/library/core/src/str/iter.rs +++ b/library/core/src/str/iter.rs @@ -356,6 +356,8 @@ impl Iterator for Bytes<'_> { } #[inline] + #[allow(unused_parens)] + #[core::contracts::requires(idx < self.0.len())] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> u8 { // SAFETY: the caller must uphold the safety contract // for `Iterator::__iterator_get_unchecked`. diff --git a/library/core/src/str/pattern.rs b/library/core/src/str/pattern.rs index e116b13838323..528bba04351bb 100644 --- a/library/core/src/str/pattern.rs +++ b/library/core/src/str/pattern.rs @@ -1921,6 +1921,8 @@ fn simd_contains(needle: &str, haystack: &str) -> Option { all(target_arch = "loongarch64", target_feature = "lsx") ))] #[inline] +#[allow(unused_parens)] +#[core::contracts::requires(x.len() == y.len())] unsafe fn small_slice_eq(x: &[u8], y: &[u8]) -> bool { debug_assert_eq!(x.len(), y.len()); // This function is adapted from diff --git a/library/core/src/time.rs b/library/core/src/time.rs index f721fcd6156cf..05af55fc1a461 100644 --- a/library/core/src/time.rs +++ b/library/core/src/time.rs @@ -191,6 +191,8 @@ impl Duration { #[inline] #[must_use] #[rustc_const_stable(feature = "duration_consts_2", since = "1.58.0")] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::ensures(|duration: &Duration| duration.nanos.as_inner() < NANOS_PER_SEC)] pub const fn new(secs: u64, nanos: u32) -> Duration { if nanos < NANOS_PER_SEC { // SAFETY: nanos < NANOS_PER_SEC, therefore nanos is within the valid range @@ -221,6 +223,10 @@ impl Duration { #[must_use] #[inline] #[rustc_const_stable(feature = "duration_consts", since = "1.32.0")] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::ensures( + move |duration: &Duration| + duration.nanos.as_inner() < NANOS_PER_SEC && duration.secs == secs)] pub const fn from_secs(secs: u64) -> Duration { Duration { secs, nanos: Nanoseconds::ZERO } } @@ -241,6 +247,8 @@ impl Duration { #[must_use] #[inline] #[rustc_const_stable(feature = "duration_consts", since = "1.32.0")] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::ensures(|duration: &Duration| duration.nanos.as_inner() < NANOS_PER_SEC)] pub const fn from_millis(millis: u64) -> Duration { let secs = millis / MILLIS_PER_SEC; let subsec_millis = (millis % MILLIS_PER_SEC) as u32; @@ -267,6 +275,8 @@ impl Duration { #[must_use] #[inline] #[rustc_const_stable(feature = "duration_consts", since = "1.32.0")] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::ensures(|duration: &Duration| duration.nanos.as_inner() < NANOS_PER_SEC)] pub const fn from_micros(micros: u64) -> Duration { let secs = micros / MICROS_PER_SEC; let subsec_micros = (micros % MICROS_PER_SEC) as u32; @@ -298,6 +308,8 @@ impl Duration { #[must_use] #[inline] #[rustc_const_stable(feature = "duration_consts", since = "1.32.0")] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::ensures(|duration: &Duration| duration.nanos.as_inner() < NANOS_PER_SEC)] pub const fn from_nanos(nanos: u64) -> Duration { const NANOS_PER_SEC: u64 = self::NANOS_PER_SEC as u64; let secs = nanos / NANOS_PER_SEC; @@ -504,6 +516,9 @@ impl Duration { #[rustc_const_stable(feature = "duration_consts", since = "1.32.0")] #[must_use] #[inline] + // FIXME: requires `&self` to be `'static` + // #[rustc_allow_const_fn_unstable(contracts)] + // #[core::contracts::ensures(move |secs: &u64| *secs == self.secs)] pub const fn as_secs(&self) -> u64 { self.secs } @@ -527,6 +542,9 @@ impl Duration { #[rustc_const_stable(feature = "duration_consts", since = "1.32.0")] #[must_use] #[inline] + // FIXME: requires `&self` to be `'static` + // #[rustc_allow_const_fn_unstable(contracts)] + // #[core::contracts::ensures(move |ms| *ms == self.nanos.as_inner() / NANOS_PER_MILLI)] pub const fn subsec_millis(&self) -> u32 { self.nanos.as_inner() / NANOS_PER_MILLI } @@ -550,6 +568,9 @@ impl Duration { #[rustc_const_stable(feature = "duration_consts", since = "1.32.0")] #[must_use] #[inline] + // FIXME: requires `&self` to be `'static` + // #[rustc_allow_const_fn_unstable(contracts)] + // #[core::contracts::ensures(|ms| *ms == self.nanos.as_inner() / NANOS_PER_MICRO)] pub const fn subsec_micros(&self) -> u32 { self.nanos.as_inner() / NANOS_PER_MICRO } @@ -573,6 +594,9 @@ impl Duration { #[rustc_const_stable(feature = "duration_consts", since = "1.32.0")] #[must_use] #[inline] + // FIXME: requires `&self` to be `'static` + // #[rustc_allow_const_fn_unstable(contracts)] + // #[core::contracts::ensures(|nanos| *nanos == self.nanos.as_inner())] pub const fn subsec_nanos(&self) -> u32 { self.nanos.as_inner() } @@ -591,6 +615,12 @@ impl Duration { #[rustc_const_stable(feature = "duration_as_u128", since = "1.33.0")] #[must_use] #[inline] + // FIXME: requires `&self` to be `'static` + // #[rustc_allow_const_fn_unstable(contracts)] + // #[core::contracts::ensures( + // |ms| + // *ms == self.secs as u128 * MILLIS_PER_SEC as u128 + + // (self.nanos.as_inner() / NANOS_PER_MILLI) as u128)] pub const fn as_millis(&self) -> u128 { self.secs as u128 * MILLIS_PER_SEC as u128 + (self.nanos.as_inner() / NANOS_PER_MILLI) as u128 @@ -610,6 +640,12 @@ impl Duration { #[rustc_const_stable(feature = "duration_as_u128", since = "1.33.0")] #[must_use] #[inline] + // FIXME: requires `&self` to be `'static` + // #[rustc_allow_const_fn_unstable(contracts)] + // #[core::contracts::ensures( + // |ms| + // *ms == self.secs as u128 * MICROS_PER_SEC as u128 + + // (self.nanos.as_inner() / NANOS_PER_MICRO) as u128)] pub const fn as_micros(&self) -> u128 { self.secs as u128 * MICROS_PER_SEC as u128 + (self.nanos.as_inner() / NANOS_PER_MICRO) as u128 @@ -668,6 +704,10 @@ impl Duration { without modifying the original"] #[inline] #[rustc_const_stable(feature = "duration_consts_2", since = "1.58.0")] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::ensures( + |duration: &Option| + duration.is_none() || duration.unwrap().nanos.as_inner() < NANOS_PER_SEC)] pub const fn checked_add(self, rhs: Duration) -> Option { if let Some(mut secs) = self.secs.checked_add(rhs.secs) { let mut nanos = self.nanos.as_inner() + rhs.nanos.as_inner(); @@ -726,6 +766,10 @@ impl Duration { without modifying the original"] #[inline] #[rustc_const_stable(feature = "duration_consts_2", since = "1.58.0")] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::ensures( + |duration: &Option| + duration.is_none() || duration.unwrap().nanos.as_inner() < NANOS_PER_SEC)] pub const fn checked_sub(self, rhs: Duration) -> Option { if let Some(mut secs) = self.secs.checked_sub(rhs.secs) { let nanos = if self.nanos.as_inner() >= rhs.nanos.as_inner() { @@ -782,6 +826,10 @@ impl Duration { without modifying the original"] #[inline] #[rustc_const_stable(feature = "duration_consts_2", since = "1.58.0")] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::ensures( + |duration: &Option| + duration.is_none() || duration.unwrap().nanos.as_inner() < NANOS_PER_SEC)] pub const fn checked_mul(self, rhs: u32) -> Option { // Multiply nanoseconds as u64, because it cannot overflow that way. let total_nanos = self.nanos.as_inner() as u64 * rhs as u64; @@ -838,6 +886,10 @@ impl Duration { without modifying the original"] #[inline] #[rustc_const_stable(feature = "duration_consts_2", since = "1.58.0")] + #[rustc_allow_const_fn_unstable(contracts)] + #[core::contracts::ensures( + move |duration: &Option| + rhs == 0 || duration.unwrap().nanos.as_inner() < NANOS_PER_SEC)] pub const fn checked_div(self, rhs: u32) -> Option { if rhs != 0 { let (secs, extra_secs) = (self.secs / (rhs as u64), self.secs % (rhs as u64)); diff --git a/library/std/src/alloc.rs b/library/std/src/alloc.rs index 1d61630269ac3..ccf2d785a3d93 100644 --- a/library/std/src/alloc.rs +++ b/library/std/src/alloc.rs @@ -151,6 +151,12 @@ impl System { // SAFETY: Same as `Allocator::grow` #[inline] + #[allow(unused_parens)] + #[core::contracts::requires( + new_layout.size() >= old_layout.size() && + ptr.as_ptr().is_aligned_to(old_layout.align()) && + (old_layout.size() == 0 || old_layout.align() != 0) && + (new_layout.size() == 0 || new_layout.align() != 0))] unsafe fn grow_impl( &self, ptr: NonNull, @@ -213,6 +219,8 @@ unsafe impl Allocator for System { } #[inline] + #[allow(unused_parens)] + #[core::contracts::requires(layout.size() != 0)] unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { if layout.size() != 0 { // SAFETY: `layout` is non-zero in size, @@ -222,6 +230,8 @@ unsafe impl Allocator for System { } #[inline] + #[allow(unused_parens)] + #[core::contracts::requires(new_layout.size() >= old_layout.size())] unsafe fn grow( &self, ptr: NonNull, @@ -233,6 +243,8 @@ unsafe impl Allocator for System { } #[inline] + #[allow(unused_parens)] + #[core::contracts::requires(new_layout.size() >= old_layout.size())] unsafe fn grow_zeroed( &self, ptr: NonNull, @@ -244,6 +256,8 @@ unsafe impl Allocator for System { } #[inline] + #[allow(unused_parens)] + #[core::contracts::requires(new_layout.size() <= old_layout.size())] unsafe fn shrink( &self, ptr: NonNull, @@ -394,6 +408,8 @@ pub mod __default_lib_allocator { // ABI #[rustc_std_internal_symbol] + #[allow(unused_parens)] + #[core::contracts::requires(align.is_power_of_two())] pub unsafe extern "C" fn __rdl_alloc(size: usize, align: usize) -> *mut u8 { // SAFETY: see the guarantees expected by `Layout::from_size_align` and // `GlobalAlloc::alloc`. @@ -404,6 +420,8 @@ pub mod __default_lib_allocator { } #[rustc_std_internal_symbol] + #[allow(unused_parens)] + #[core::contracts::requires(align.is_power_of_two())] pub unsafe extern "C" fn __rdl_dealloc(ptr: *mut u8, size: usize, align: usize) { // SAFETY: see the guarantees expected by `Layout::from_size_align` and // `GlobalAlloc::dealloc`. @@ -411,6 +429,8 @@ pub mod __default_lib_allocator { } #[rustc_std_internal_symbol] + #[allow(unused_parens)] + #[core::contracts::requires(align.is_power_of_two())] pub unsafe extern "C" fn __rdl_realloc( ptr: *mut u8, old_size: usize, @@ -426,6 +446,8 @@ pub mod __default_lib_allocator { } #[rustc_std_internal_symbol] + #[allow(unused_parens)] + #[core::contracts::requires(align.is_power_of_two())] pub unsafe extern "C" fn __rdl_alloc_zeroed(size: usize, align: usize) -> *mut u8 { // SAFETY: see the guarantees expected by `Layout::from_size_align` and // `GlobalAlloc::alloc_zeroed`. diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs index da41c1216c4d5..58f0325612442 100644 --- a/library/std/src/lib.rs +++ b/library/std/src/lib.rs @@ -256,6 +256,8 @@ #![deny(ffi_unwind_calls)] // std may use features in a platform-specific way #![allow(unused_features)] +// permit use of experimental feature contracts +#![allow(incomplete_features)] // // Features: #![cfg_attr(test, feature(internal_output_capture, print_internals, update_panic_count, rt))] @@ -281,6 +283,7 @@ #![feature(cfi_encoding)] #![feature(char_max_len)] #![feature(const_trait_impl)] +#![feature(contracts)] #![feature(core_float_math)] #![feature(decl_macro)] #![feature(deprecated_suggestion)] diff --git a/library/std/src/sync/mpmc/context.rs b/library/std/src/sync/mpmc/context.rs index 6b2f4cb6ffd29..2ebb5a3b9b0c7 100644 --- a/library/std/src/sync/mpmc/context.rs +++ b/library/std/src/sync/mpmc/context.rs @@ -116,6 +116,8 @@ impl Context { /// # Safety /// This may only be called from the thread this `Context` belongs to. #[inline] + #[allow(unused_parens)] + #[core::contracts::requires(self.thread_id() == current_thread_id())] pub unsafe fn wait_until(&self, deadline: Option) -> Selected { loop { // Check whether an operation has been selected. diff --git a/tests/mir-opt/dataflow-const-prop/default_boxed_slice.main.DataflowConstProp.64bit.panic-unwind.diff b/tests/mir-opt/dataflow-const-prop/default_boxed_slice.main.DataflowConstProp.64bit.panic-unwind.diff index 6c59f5e3e2e86..00038a4e8de05 100644 --- a/tests/mir-opt/dataflow-const-prop/default_boxed_slice.main.DataflowConstProp.64bit.panic-unwind.diff +++ b/tests/mir-opt/dataflow-const-prop/default_boxed_slice.main.DataflowConstProp.64bit.panic-unwind.diff @@ -16,23 +16,28 @@ scope 4 (inlined Unique::<[bool; 0]>::dangling) { let mut _5: std::ptr::NonNull<[bool; 0]>; scope 5 (inlined NonNull::<[bool; 0]>::dangling) { - let mut _6: std::num::NonZero; + let mut _6: std::ptr::NonNull<[bool; 0]>; + let mut _7: std::num::NonZero; scope 6 { - scope 8 (inlined std::ptr::Alignment::as_nonzero) { - } - scope 9 (inlined NonNull::<[bool; 0]>::without_provenance) { - let _7: *const [bool; 0]; - scope 10 { - } - scope 11 (inlined NonZero::::get) { + scope 7 { + scope 10 (inlined std::ptr::Alignment::as_nonzero) { } - scope 12 (inlined std::ptr::without_provenance::<[bool; 0]>) { - scope 13 (inlined without_provenance_mut::<[bool; 0]>) { + scope 11 (inlined NonNull::<[bool; 0]>::without_provenance) { + let _8: *const [bool; 0]; + scope 12 { + } + scope 13 (inlined NonZero::::get) { + } + scope 14 (inlined std::ptr::without_provenance::<[bool; 0]>) { + scope 15 (inlined without_provenance_mut::<[bool; 0]>) { + } } } } + scope 9 (inlined std::ptr::Alignment::of::<[bool; 0]>) { + } } - scope 7 (inlined std::ptr::Alignment::of::<[bool; 0]>) { + scope 8 (inlined core::contracts::build_check_ensures::, {closure@NonNull<[bool; 0]>::dangling::{closure#0}}>) { } } } @@ -45,22 +50,14 @@ StorageLive(_4); StorageLive(_5); StorageLive(_6); - _6 = const NonZero::(core::num::niche_types::NonZeroUsizeInner(1_usize)); StorageLive(_7); - _7 = const {0x1 as *const [bool; 0]}; - _5 = const NonNull::<[bool; 0]> {{ pointer: {0x1 as *const [bool; 0]} }}; + _7 = const NonZero::(core::num::niche_types::NonZeroUsizeInner(1_usize)); + StorageLive(_8); + _8 = const {0x1 as *const [bool; 0]}; + _6 = const NonNull::<[bool; 0]> {{ pointer: {0x1 as *const [bool; 0]} }}; + StorageDead(_8); StorageDead(_7); - StorageDead(_6); - _4 = const Unique::<[bool; 0]> {{ pointer: NonNull::<[bool; 0]> {{ pointer: {0x1 as *const [bool; 0]} }}, _marker: PhantomData::<[bool; 0]> }}; - StorageDead(_5); - _3 = const Unique::<[bool]> {{ pointer: NonNull::<[bool]> {{ pointer: Indirect { alloc_id: ALLOC0, offset: Size(0 bytes) }: *const [bool] }}, _marker: PhantomData::<[bool]> }}; - StorageDead(_4); - _2 = const Box::<[bool]>(Unique::<[bool]> {{ pointer: NonNull::<[bool]> {{ pointer: Indirect { alloc_id: ALLOC1, offset: Size(0 bytes) }: *const [bool] }}, _marker: PhantomData::<[bool]> }}, std::alloc::Global); - StorageDead(_3); - _1 = const A {{ foo: Box::<[bool]>(Unique::<[bool]> {{ pointer: NonNull::<[bool]> {{ pointer: Indirect { alloc_id: ALLOC2, offset: Size(0 bytes) }: *const [bool] }}, _marker: PhantomData::<[bool]> }}, std::alloc::Global) }}; - StorageDead(_2); - _0 = const (); - drop(_1) -> [return: bb1, unwind: bb2]; + _5 = contract_check_ensures::<{closure@NonNull<[bool; 0]>::dangling::{closure#0}}, NonNull<[bool; 0]>>(const ZeroSized: {closure@NonNull<[bool; 0]>::dangling::{closure#0}}, const NonNull::<[bool; 0]> {{ pointer: {0x1 as *const [bool; 0]} }}) -> [return: bb3, unwind continue]; } bb1: { @@ -71,11 +68,19 @@ bb2 (cleanup): { resume; } - } - - ALLOC2 (size: 16, align: 8) { .. } - - ALLOC1 (size: 16, align: 8) { .. } - ALLOC0 (size: 16, align: 8) { .. } + bb3: { + StorageDead(_6); + _4 = Unique::<[bool; 0]> { pointer: move _5, _marker: const PhantomData::<[bool; 0]> }; + StorageDead(_5); + _3 = move _4 as std::ptr::Unique<[bool]> (PointerCoercion(Unsize, Implicit)); + StorageDead(_4); + _2 = Box::<[bool]>(copy _3, const std::alloc::Global); + StorageDead(_3); + _1 = A { foo: move _2 }; + StorageDead(_2); + _0 = const (); + drop(_1) -> [return: bb1, unwind: bb2]; + } + } diff --git a/tests/mir-opt/dataflow-const-prop/default_boxed_slice.main.GVN.64bit.panic-unwind.diff b/tests/mir-opt/dataflow-const-prop/default_boxed_slice.main.GVN.64bit.panic-unwind.diff index 02934c02587d2..300250b78e499 100644 --- a/tests/mir-opt/dataflow-const-prop/default_boxed_slice.main.GVN.64bit.panic-unwind.diff +++ b/tests/mir-opt/dataflow-const-prop/default_boxed_slice.main.GVN.64bit.panic-unwind.diff @@ -16,23 +16,28 @@ scope 4 (inlined Unique::<[bool; 0]>::dangling) { let mut _5: std::ptr::NonNull<[bool; 0]>; scope 5 (inlined NonNull::<[bool; 0]>::dangling) { - let mut _6: std::num::NonZero; + let mut _6: std::ptr::NonNull<[bool; 0]>; + let mut _7: std::num::NonZero; scope 6 { - scope 8 (inlined std::ptr::Alignment::as_nonzero) { - } - scope 9 (inlined NonNull::<[bool; 0]>::without_provenance) { - let _7: *const [bool; 0]; - scope 10 { - } - scope 11 (inlined NonZero::::get) { + scope 7 { + scope 10 (inlined std::ptr::Alignment::as_nonzero) { } - scope 12 (inlined std::ptr::without_provenance::<[bool; 0]>) { - scope 13 (inlined without_provenance_mut::<[bool; 0]>) { + scope 11 (inlined NonNull::<[bool; 0]>::without_provenance) { + let _8: *const [bool; 0]; + scope 12 { + } + scope 13 (inlined NonZero::::get) { + } + scope 14 (inlined std::ptr::without_provenance::<[bool; 0]>) { + scope 15 (inlined without_provenance_mut::<[bool; 0]>) { + } } } } + scope 9 (inlined std::ptr::Alignment::of::<[bool; 0]>) { + } } - scope 7 (inlined std::ptr::Alignment::of::<[bool; 0]>) { + scope 8 (inlined core::contracts::build_check_ensures::, {closure@NonNull<[bool; 0]>::dangling::{closure#0}}>) { } } } @@ -45,29 +50,18 @@ StorageLive(_4); StorageLive(_5); StorageLive(_6); -- _6 = const std::ptr::Alignment::of::<[bool; 0]>::{constant#0} as std::num::NonZero (Transmute); -+ _6 = const NonZero::(core::num::niche_types::NonZeroUsizeInner(1_usize)); StorageLive(_7); -- _7 = copy _6 as *const [bool; 0] (Transmute); -- _5 = NonNull::<[bool; 0]> { pointer: copy _7 }; -+ _7 = const {0x1 as *const [bool; 0]}; -+ _5 = const NonNull::<[bool; 0]> {{ pointer: {0x1 as *const [bool; 0]} }}; +- _7 = const std::ptr::Alignment::of::<[bool; 0]>::{constant#0} as std::num::NonZero (Transmute); ++ _7 = const NonZero::(core::num::niche_types::NonZeroUsizeInner(1_usize)); + StorageLive(_8); +- _8 = copy _7 as *const [bool; 0] (Transmute); +- _6 = NonNull::<[bool; 0]> { pointer: copy _8 }; ++ _8 = const {0x1 as *const [bool; 0]}; ++ _6 = const NonNull::<[bool; 0]> {{ pointer: {0x1 as *const [bool; 0]} }}; + StorageDead(_8); StorageDead(_7); - StorageDead(_6); -- _4 = Unique::<[bool; 0]> { pointer: move _5, _marker: const PhantomData::<[bool; 0]> }; -+ _4 = const Unique::<[bool; 0]> {{ pointer: NonNull::<[bool; 0]> {{ pointer: {0x1 as *const [bool; 0]} }}, _marker: PhantomData::<[bool; 0]> }}; - StorageDead(_5); -- _3 = move _4 as std::ptr::Unique<[bool]> (PointerCoercion(Unsize, Implicit)); -+ _3 = const Unique::<[bool]> {{ pointer: NonNull::<[bool]> {{ pointer: Indirect { alloc_id: ALLOC0, offset: Size(0 bytes) }: *const [bool] }}, _marker: PhantomData::<[bool]> }}; - StorageDead(_4); -- _2 = Box::<[bool]>(copy _3, const std::alloc::Global); -+ _2 = const Box::<[bool]>(Unique::<[bool]> {{ pointer: NonNull::<[bool]> {{ pointer: Indirect { alloc_id: ALLOC1, offset: Size(0 bytes) }: *const [bool] }}, _marker: PhantomData::<[bool]> }}, std::alloc::Global); - StorageDead(_3); -- _1 = A { foo: move _2 }; -+ _1 = const A {{ foo: Box::<[bool]>(Unique::<[bool]> {{ pointer: NonNull::<[bool]> {{ pointer: Indirect { alloc_id: ALLOC2, offset: Size(0 bytes) }: *const [bool] }}, _marker: PhantomData::<[bool]> }}, std::alloc::Global) }}; - StorageDead(_2); - _0 = const (); - drop(_1) -> [return: bb1, unwind: bb2]; +- _5 = contract_check_ensures::<{closure@NonNull<[bool; 0]>::dangling::{closure#0}}, NonNull<[bool; 0]>>(const ZeroSized: {closure@NonNull<[bool; 0]>::dangling::{closure#0}}, move _6) -> [return: bb3, unwind continue]; ++ _5 = contract_check_ensures::<{closure@NonNull<[bool; 0]>::dangling::{closure#0}}, NonNull<[bool; 0]>>(const ZeroSized: {closure@NonNull<[bool; 0]>::dangling::{closure#0}}, const NonNull::<[bool; 0]> {{ pointer: {0x1 as *const [bool; 0]} }}) -> [return: bb3, unwind continue]; } bb1: { @@ -78,11 +72,19 @@ bb2 (cleanup): { resume; } + + bb3: { + StorageDead(_6); + _4 = Unique::<[bool; 0]> { pointer: move _5, _marker: const PhantomData::<[bool; 0]> }; + StorageDead(_5); + _3 = move _4 as std::ptr::Unique<[bool]> (PointerCoercion(Unsize, Implicit)); + StorageDead(_4); + _2 = Box::<[bool]>(copy _3, const std::alloc::Global); + StorageDead(_3); + _1 = A { foo: move _2 }; + StorageDead(_2); + _0 = const (); + drop(_1) -> [return: bb1, unwind: bb2]; + } } -+ -+ ALLOC2 (size: 16, align: 8) { .. } -+ -+ ALLOC1 (size: 16, align: 8) { .. } -+ -+ ALLOC0 (size: 16, align: 8) { .. } diff --git a/tests/mir-opt/gvn_ptr_eq_with_constant.main.GVN.diff b/tests/mir-opt/gvn_ptr_eq_with_constant.main.GVN.diff index f56af33ea603f..a5cbd3247bb22 100644 --- a/tests/mir-opt/gvn_ptr_eq_with_constant.main.GVN.diff +++ b/tests/mir-opt/gvn_ptr_eq_with_constant.main.GVN.diff @@ -6,59 +6,81 @@ let _1: bool; let mut _2: *mut u8; scope 1 (inlined dangling_mut::) { + let mut _3: std::ptr::NonNull; scope 2 (inlined NonNull::::dangling) { - let mut _3: std::num::NonZero; + let mut _4: std::ptr::NonNull; + let mut _5: std::num::NonZero; scope 3 { - scope 5 (inlined std::ptr::Alignment::as_nonzero) { - } - scope 6 (inlined NonNull::::without_provenance) { - scope 7 { - } - scope 8 (inlined NonZero::::get) { + scope 4 { + scope 7 (inlined std::ptr::Alignment::as_nonzero) { } - scope 9 (inlined std::ptr::without_provenance::) { - scope 10 (inlined without_provenance_mut::) { + scope 8 (inlined NonNull::::without_provenance) { + let _6: *const u8; + scope 9 { + } + scope 10 (inlined NonZero::::get) { + } + scope 11 (inlined std::ptr::without_provenance::) { + scope 12 (inlined without_provenance_mut::) { + } } } } + scope 6 (inlined std::ptr::Alignment::of::) { + } } - scope 4 (inlined std::ptr::Alignment::of::) { + scope 5 (inlined core::contracts::build_check_ensures::, {closure@NonNull::dangling::{closure#0}}>) { } } - scope 11 (inlined NonNull::::as_ptr) { + scope 13 (inlined NonNull::::as_ptr) { } } - scope 12 (inlined Foo::::cmp_ptr) { - let mut _4: *const u8; - let mut _5: *mut u8; - let mut _6: *const u8; - scope 13 (inlined std::ptr::eq::) { + scope 14 (inlined Foo::::cmp_ptr) { + let mut _7: *const u8; + let mut _8: *mut u8; + let mut _9: *const u8; + scope 15 (inlined std::ptr::eq::) { } } bb0: { StorageLive(_1); StorageLive(_2); - StorageLive(_3); -- _3 = const std::ptr::Alignment::of::::{constant#0} as std::num::NonZero (Transmute); -- _2 = copy _3 as *mut u8 (Transmute); -+ _3 = const NonZero::(core::num::niche_types::NonZeroUsizeInner(1_usize)); -+ _2 = const {0x1 as *mut u8}; - StorageDead(_3); +- StorageLive(_3); ++ nop; StorageLive(_4); StorageLive(_5); -- _5 = copy _2; -- _4 = copy _2 as *const u8 (PtrToPtr); -+ _5 = const {0x1 as *mut u8}; -+ _4 = const {0x1 as *const u8}; - StorageDead(_5); +- _5 = const std::ptr::Alignment::of::::{constant#0} as std::num::NonZero (Transmute); ++ _5 = const NonZero::(core::num::niche_types::NonZeroUsizeInner(1_usize)); StorageLive(_6); -- _6 = const Foo::::SENTINEL as *const u8 (PtrToPtr); -- _1 = Eq(copy _4, copy _6); +- _6 = copy _5 as *const u8 (Transmute); +- _4 = NonNull:: { pointer: copy _6 }; + _6 = const {0x1 as *const u8}; -+ _1 = const true; ++ _4 = const NonNull:: {{ pointer: {0x1 as *const u8} }}; StorageDead(_6); + StorageDead(_5); +- _3 = contract_check_ensures::<{closure@NonNull::dangling::{closure#0}}, NonNull>(const ZeroSized: {closure@NonNull::dangling::{closure#0}}, move _4) -> [return: bb1, unwind continue]; ++ _3 = contract_check_ensures::<{closure@NonNull::dangling::{closure#0}}, NonNull>(const ZeroSized: {closure@NonNull::dangling::{closure#0}}, const NonNull:: {{ pointer: {0x1 as *const u8} }}) -> [return: bb1, unwind continue]; + } + + bb1: { StorageDead(_4); + _2 = copy _3 as *mut u8 (Transmute); +- StorageDead(_3); ++ nop; + StorageLive(_7); + StorageLive(_8); + _8 = copy _2; +- _7 = copy _2 as *const u8 (PtrToPtr); ++ _7 = copy _3 as *const u8 (Transmute); + StorageDead(_8); + StorageLive(_9); +- _9 = const Foo::::SENTINEL as *const u8 (PtrToPtr); +- _1 = Eq(copy _7, copy _9); ++ _9 = const {0x1 as *const u8}; ++ _1 = Eq(copy _7, const {0x1 as *const u8}); + StorageDead(_9); + StorageDead(_7); StorageDead(_2); StorageDead(_1); _0 = const (); diff --git a/tests/mir-opt/inline/unchecked_shifts.unchecked_shl_unsigned_smaller.Inline.panic-unwind.diff b/tests/mir-opt/inline/unchecked_shifts.unchecked_shl_unsigned_smaller.Inline.panic-unwind.diff index 61fdb69f74b70..b7ec2363af214 100644 --- a/tests/mir-opt/inline/unchecked_shifts.unchecked_shl_unsigned_smaller.Inline.panic-unwind.diff +++ b/tests/mir-opt/inline/unchecked_shifts.unchecked_shl_unsigned_smaller.Inline.panic-unwind.diff @@ -9,8 +9,11 @@ let mut _4: u32; + scope 1 (inlined #[track_caller] core::num::::unchecked_shl) { + let _5: (); ++ let mut _6: {closure@core::num::::unchecked_shl::{closure#0}}; ++ let mut _7: &u32; ++ let _8: (); + scope 2 (inlined core::ub_checks::check_language_ub) { -+ let mut _6: bool; ++ let mut _9: bool; + scope 3 (inlined core::ub_checks::check_language_ub::runtime) { + } + } @@ -23,18 +26,30 @@ _4 = copy _2; - _0 = core::num::::unchecked_shl(move _3, move _4) -> [return: bb1, unwind continue]; + StorageLive(_5); ++ StorageLive(_8); ++ StorageLive(_9); + StorageLive(_6); -+ _6 = UbChecks(); -+ switchInt(copy _6) -> [0: bb2, otherwise: bb1]; ++ StorageLive(_7); ++ _7 = &_4; ++ _6 = {closure@$SRC_DIR/core/src/num/uint_macros.rs:LL:COL} { 0: copy _7 }; ++ StorageDead(_7); ++ _5 = contract_check_requires::<{closure@core::num::::unchecked_shl::{closure#0}}>(move _6) -> [return: bb1, unwind continue]; } bb1: { -+ _5 = core::num::::unchecked_shl::precondition_check(copy _4) -> [return: bb2, unwind unreachable]; ++ StorageDead(_6); ++ _9 = UbChecks(); ++ switchInt(copy _9) -> [0: bb3, otherwise: bb2]; + } + + bb2: { ++ _8 = core::num::::unchecked_shl::precondition_check(copy _4) -> [return: bb3, unwind unreachable]; ++ } ++ ++ bb3: { + _0 = ShlUnchecked(copy _3, copy _4); -+ StorageDead(_6); ++ StorageDead(_9); ++ StorageDead(_8); + StorageDead(_5); StorageDead(_4); StorageDead(_3); diff --git a/tests/mir-opt/inline/unchecked_shifts.unchecked_shl_unsigned_smaller.PreCodegen.after.panic-unwind.mir b/tests/mir-opt/inline/unchecked_shifts.unchecked_shl_unsigned_smaller.PreCodegen.after.panic-unwind.mir index 0fc7c4b7947e2..43fcb04fbb61b 100644 --- a/tests/mir-opt/inline/unchecked_shifts.unchecked_shl_unsigned_smaller.PreCodegen.after.panic-unwind.mir +++ b/tests/mir-opt/inline/unchecked_shifts.unchecked_shl_unsigned_smaller.PreCodegen.after.panic-unwind.mir @@ -4,7 +4,11 @@ fn unchecked_shl_unsigned_smaller(_1: u16, _2: u32) -> u16 { debug a => _1; debug b => _2; let mut _0: u16; + let mut _3: u32; scope 1 (inlined #[track_caller] core::num::::unchecked_shl) { + let mut _4: &u32; + let mut _5: {closure@core::num::::unchecked_shl::{closure#0}}; + let _6: (); scope 2 (inlined core::ub_checks::check_language_ub) { scope 3 (inlined core::ub_checks::check_language_ub::runtime) { } @@ -12,7 +16,20 @@ fn unchecked_shl_unsigned_smaller(_1: u16, _2: u32) -> u16 { } bb0: { + StorageLive(_3); + _3 = copy _2; + StorageLive(_5); + StorageLive(_4); + _4 = &_3; + _5 = {closure@$SRC_DIR/core/src/num/uint_macros.rs:LL:COL} { 0: copy _4 }; + StorageDead(_4); + _6 = contract_check_requires::<{closure@core::num::::unchecked_shl::{closure#0}}>(move _5) -> [return: bb1, unwind continue]; + } + + bb1: { + StorageDead(_5); _0 = ShlUnchecked(copy _1, copy _2); + StorageDead(_3); return; } } diff --git a/tests/mir-opt/inline/unchecked_shifts.unchecked_shr_signed_bigger.Inline.panic-unwind.diff b/tests/mir-opt/inline/unchecked_shifts.unchecked_shr_signed_bigger.Inline.panic-unwind.diff index b13531ab148f2..97cb18000a6b1 100644 --- a/tests/mir-opt/inline/unchecked_shifts.unchecked_shr_signed_bigger.Inline.panic-unwind.diff +++ b/tests/mir-opt/inline/unchecked_shifts.unchecked_shr_signed_bigger.Inline.panic-unwind.diff @@ -9,8 +9,11 @@ let mut _4: u32; + scope 1 (inlined #[track_caller] core::num::::unchecked_shr) { + let _5: (); ++ let mut _6: {closure@core::num::::unchecked_shr::{closure#0}}; ++ let mut _7: &u32; ++ let _8: (); + scope 2 (inlined core::ub_checks::check_language_ub) { -+ let mut _6: bool; ++ let mut _9: bool; + scope 3 (inlined core::ub_checks::check_language_ub::runtime) { + } + } @@ -23,18 +26,30 @@ _4 = copy _2; - _0 = core::num::::unchecked_shr(move _3, move _4) -> [return: bb1, unwind continue]; + StorageLive(_5); ++ StorageLive(_8); ++ StorageLive(_9); + StorageLive(_6); -+ _6 = UbChecks(); -+ switchInt(copy _6) -> [0: bb2, otherwise: bb1]; ++ StorageLive(_7); ++ _7 = &_4; ++ _6 = {closure@$SRC_DIR/core/src/num/int_macros.rs:LL:COL} { 0: copy _7 }; ++ StorageDead(_7); ++ _5 = contract_check_requires::<{closure@core::num::::unchecked_shr::{closure#0}}>(move _6) -> [return: bb1, unwind continue]; } bb1: { -+ _5 = core::num::::unchecked_shr::precondition_check(copy _4) -> [return: bb2, unwind unreachable]; ++ StorageDead(_6); ++ _9 = UbChecks(); ++ switchInt(copy _9) -> [0: bb3, otherwise: bb2]; + } + + bb2: { ++ _8 = core::num::::unchecked_shr::precondition_check(copy _4) -> [return: bb3, unwind unreachable]; ++ } ++ ++ bb3: { + _0 = ShrUnchecked(copy _3, copy _4); -+ StorageDead(_6); ++ StorageDead(_9); ++ StorageDead(_8); + StorageDead(_5); StorageDead(_4); StorageDead(_3); diff --git a/tests/mir-opt/inline/unchecked_shifts.unchecked_shr_signed_bigger.PreCodegen.after.panic-unwind.mir b/tests/mir-opt/inline/unchecked_shifts.unchecked_shr_signed_bigger.PreCodegen.after.panic-unwind.mir index bef7fa7b1df75..af54b54c0fb6c 100644 --- a/tests/mir-opt/inline/unchecked_shifts.unchecked_shr_signed_bigger.PreCodegen.after.panic-unwind.mir +++ b/tests/mir-opt/inline/unchecked_shifts.unchecked_shr_signed_bigger.PreCodegen.after.panic-unwind.mir @@ -4,7 +4,11 @@ fn unchecked_shr_signed_bigger(_1: i64, _2: u32) -> i64 { debug a => _1; debug b => _2; let mut _0: i64; + let mut _3: u32; scope 1 (inlined #[track_caller] core::num::::unchecked_shr) { + let mut _4: &u32; + let mut _5: {closure@core::num::::unchecked_shr::{closure#0}}; + let _6: (); scope 2 (inlined core::ub_checks::check_language_ub) { scope 3 (inlined core::ub_checks::check_language_ub::runtime) { } @@ -12,7 +16,20 @@ fn unchecked_shr_signed_bigger(_1: i64, _2: u32) -> i64 { } bb0: { + StorageLive(_3); + _3 = copy _2; + StorageLive(_5); + StorageLive(_4); + _4 = &_3; + _5 = {closure@$SRC_DIR/core/src/num/int_macros.rs:LL:COL} { 0: copy _4 }; + StorageDead(_4); + _6 = contract_check_requires::<{closure@core::num::::unchecked_shr::{closure#0}}>(move _5) -> [return: bb1, unwind continue]; + } + + bb1: { + StorageDead(_5); _0 = ShrUnchecked(copy _1, copy _2); + StorageDead(_3); return; } } diff --git a/tests/mir-opt/pre-codegen/checked_ops.checked_shl.PreCodegen.after.panic-unwind.mir b/tests/mir-opt/pre-codegen/checked_ops.checked_shl.PreCodegen.after.panic-unwind.mir index 18eeb8e4d3b61..1f6f93f08763a 100644 --- a/tests/mir-opt/pre-codegen/checked_ops.checked_shl.PreCodegen.after.panic-unwind.mir +++ b/tests/mir-opt/pre-codegen/checked_ops.checked_shl.PreCodegen.after.panic-unwind.mir @@ -7,7 +7,11 @@ fn checked_shl(_1: u32, _2: u32) -> Option { scope 1 (inlined core::num::::checked_shl) { let mut _3: bool; let mut _4: u32; + let mut _8: u32; scope 2 (inlined #[track_caller] core::num::::unchecked_shl) { + let mut _5: &u32; + let mut _6: {closure@core::num::::unchecked_shl::{closure#0}}; + let _7: (); scope 3 (inlined core::ub_checks::check_language_ub) { scope 4 (inlined core::ub_checks::check_language_ub::runtime) { } @@ -23,18 +27,31 @@ fn checked_shl(_1: u32, _2: u32) -> Option { bb1: { _0 = const Option::::None; - goto -> bb3; + goto -> bb4; } bb2: { + StorageLive(_8); StorageLive(_4); - _4 = ShlUnchecked(copy _1, copy _2); - _0 = Option::::Some(move _4); - StorageDead(_4); - goto -> bb3; + _4 = copy _2; + StorageLive(_6); + StorageLive(_5); + _5 = &_4; + _6 = {closure@$SRC_DIR/core/src/num/uint_macros.rs:LL:COL} { 0: copy _5 }; + StorageDead(_5); + _7 = contract_check_requires::<{closure@core::num::::unchecked_shl::{closure#0}}>(move _6) -> [return: bb3, unwind continue]; } bb3: { + StorageDead(_6); + _8 = ShlUnchecked(copy _1, copy _2); + StorageDead(_4); + _0 = Option::::Some(move _8); + StorageDead(_8); + goto -> bb4; + } + + bb4: { StorageDead(_3); return; } diff --git a/tests/mir-opt/pre-codegen/drop_boxed_slice.generic_in_place.PreCodegen.after.64bit.panic-unwind.mir b/tests/mir-opt/pre-codegen/drop_boxed_slice.generic_in_place.PreCodegen.after.64bit.panic-unwind.mir index ba6ce0ee5286f..31e3c221af7f4 100644 --- a/tests/mir-opt/pre-codegen/drop_boxed_slice.generic_in_place.PreCodegen.after.64bit.panic-unwind.mir +++ b/tests/mir-opt/pre-codegen/drop_boxed_slice.generic_in_place.PreCodegen.after.64bit.panic-unwind.mir @@ -4,100 +4,17 @@ fn generic_in_place(_1: *mut Box<[T]>) -> () { debug ptr => _1; let mut _0: (); scope 1 (inlined drop_in_place::> - shim(Some(Box<[T]>))) { - scope 2 (inlined as Drop>::drop) { - let _2: std::ptr::NonNull<[T]>; - let mut _3: *mut [T]; - let mut _4: *const [T]; - let _11: (); - scope 3 { - let _8: std::ptr::alignment::AlignmentEnum; - scope 4 { - scope 12 (inlined Layout::size) { - } - scope 13 (inlined Unique::<[T]>::cast::) { - scope 14 (inlined NonNull::<[T]>::cast::) { - scope 15 (inlined NonNull::<[T]>::as_ptr) { - } - } - } - scope 16 (inlined as From>>::from) { - scope 17 (inlined Unique::::as_non_null_ptr) { - } - } - scope 18 (inlined ::deallocate) { - let mut _9: *mut u8; - scope 19 (inlined Layout::size) { - } - scope 20 (inlined NonNull::::as_ptr) { - } - scope 21 (inlined std::alloc::dealloc) { - let mut _10: usize; - scope 22 (inlined Layout::size) { - } - scope 23 (inlined Layout::align) { - scope 24 (inlined std::ptr::Alignment::as_usize) { - } - } - } - } - } - scope 5 (inlined Unique::<[T]>::as_ptr) { - scope 6 (inlined NonNull::<[T]>::as_ptr) { - } - } - scope 7 (inlined Layout::for_value_raw::<[T]>) { - let mut _5: usize; - let mut _6: usize; - scope 8 { - scope 11 (inlined #[track_caller] Layout::from_size_align_unchecked) { - let mut _7: std::ptr::Alignment; - } - } - scope 9 (inlined size_of_val_raw::<[T]>) { - } - scope 10 (inlined align_of_val_raw::<[T]>) { - } - } - } - } + let mut _2: &mut std::boxed::Box<[T]>; + let mut _3: (); } bb0: { StorageLive(_2); - _2 = copy (((*_1).0: std::ptr::Unique<[T]>).0: std::ptr::NonNull<[T]>); - StorageLive(_4); - _3 = copy _2 as *mut [T] (Transmute); - _4 = copy _2 as *const [T] (Transmute); - StorageLive(_6); - _5 = std::intrinsics::size_of_val::<[T]>(move _4) -> [return: bb1, unwind unreachable]; + _2 = &mut (*_1); + _3 = as Drop>::drop(move _2) -> [return: bb1, unwind continue]; } bb1: { - _6 = AlignOf(T); - StorageLive(_7); - _7 = copy _6 as std::ptr::Alignment (Transmute); - _8 = move (_7.0: std::ptr::alignment::AlignmentEnum); - StorageDead(_7); - StorageDead(_6); - StorageDead(_4); - switchInt(copy _5) -> [0: bb4, otherwise: bb2]; - } - - bb2: { - StorageLive(_9); - _9 = copy _3 as *mut u8 (PtrToPtr); - StorageLive(_10); - _10 = discriminant(_8); - _11 = alloc::alloc::__rust_dealloc(move _9, move _5, move _10) -> [return: bb3, unwind unreachable]; - } - - bb3: { - StorageDead(_10); - StorageDead(_9); - goto -> bb4; - } - - bb4: { StorageDead(_2); return; } diff --git a/tests/mir-opt/pre-codegen/loops.int_range.PreCodegen.after.mir b/tests/mir-opt/pre-codegen/loops.int_range.PreCodegen.after.mir index 154cbd3791cbd..2f7c6dee027e1 100644 --- a/tests/mir-opt/pre-codegen/loops.int_range.PreCodegen.after.mir +++ b/tests/mir-opt/pre-codegen/loops.int_range.PreCodegen.after.mir @@ -7,13 +7,15 @@ fn int_range(_1: usize, _2: usize) -> () { let mut _3: std::ops::Range; let mut _4: std::ops::Range; let mut _5: &mut std::ops::Range; - let mut _13: std::option::Option; - let _15: (); + let mut _12: usize; + let mut _13: usize; + let mut _25: std::option::Option; + let _27: (); scope 1 { debug iter => _4; - let _14: usize; + let _26: usize; scope 2 { - debug i => _14; + debug i => _26; } scope 4 (inlined iter::range::>::next) { debug self => _5; @@ -23,15 +25,25 @@ fn int_range(_1: usize, _2: usize) -> () { let mut _7: &usize; let mut _10: bool; let _11: usize; - let mut _12: usize; + let mut _24: usize; scope 6 { debug old => _11; scope 8 (inlined ::forward_unchecked) { - debug start => _11; - debug n => const 1_usize; + debug start => _12; + debug n => _13; + let mut _14: &usize; + let mut _15: &usize; + let mut _16: {closure@::forward_unchecked::{closure#0}}; + let _17: (); + let mut _18: usize; + let mut _19: usize; scope 9 (inlined #[track_caller] core::num::::unchecked_add) { - debug self => _11; - debug rhs => const 1_usize; + debug self => _18; + debug rhs => _19; + let mut _20: &usize; + let mut _21: &usize; + let mut _22: {closure@core::num::::unchecked_add::{closure#0}}; + let _23: (); scope 10 (inlined core::ub_checks::check_language_ub) { scope 11 (inlined core::ub_checks::check_language_ub::runtime) { } @@ -60,7 +72,7 @@ fn int_range(_1: usize, _2: usize) -> () { } bb1: { - StorageLive(_13); + StorageLive(_25); _5 = &mut _4; StorageLive(_10); StorageLive(_6); @@ -81,7 +93,7 @@ fn int_range(_1: usize, _2: usize) -> () { StorageDead(_7); StorageDead(_6); StorageDead(_10); - StorageDead(_13); + StorageDead(_25); StorageDead(_4); return; } @@ -90,18 +102,56 @@ fn int_range(_1: usize, _2: usize) -> () { StorageDead(_7); StorageDead(_6); _11 = copy (_4.0: usize); + StorageLive(_24); StorageLive(_12); - _12 = AddUnchecked(copy _11, const 1_usize); - (_4.0: usize) = move _12; - StorageDead(_12); - _13 = Option::::Some(copy _11); - StorageDead(_10); - _14 = copy ((_13 as Some).0: usize); - _15 = opaque::(move _14) -> [return: bb4, unwind continue]; + _12 = copy _11; + StorageLive(_13); + _13 = const 1_usize; + StorageLive(_16); + StorageLive(_14); + _14 = &_12; + StorageLive(_15); + _15 = &_13; + _16 = {closure@$SRC_DIR/core/src/iter/range.rs:LL:COL} { 0: copy _14, 1: copy _15 }; + StorageDead(_15); + StorageDead(_14); + _17 = contract_check_requires::<{closure@::forward_unchecked::{closure#0}}>(move _16) -> [return: bb4, unwind continue]; } bb4: { + StorageDead(_16); + StorageLive(_18); + _18 = copy _11; + StorageLive(_19); + _19 = const 1_usize; + StorageLive(_22); + StorageLive(_20); + _20 = &_18; + StorageLive(_21); + _21 = &_19; + _22 = {closure@$SRC_DIR/core/src/num/uint_macros.rs:LL:COL} { 0: copy _20, 1: copy _21 }; + StorageDead(_21); + StorageDead(_20); + _23 = contract_check_requires::<{closure@core::num::::unchecked_add::{closure#0}}>(move _22) -> [return: bb5, unwind continue]; + } + + bb5: { + StorageDead(_22); + _24 = AddUnchecked(copy _11, const 1_usize); + StorageDead(_19); + StorageDead(_18); StorageDead(_13); + StorageDead(_12); + (_4.0: usize) = move _24; + StorageDead(_24); + _25 = Option::::Some(copy _11); + StorageDead(_10); + _26 = copy ((_25 as Some).0: usize); + _27 = opaque::(move _26) -> [return: bb6, unwind continue]; + } + + bb6: { + StorageDead(_25); goto -> bb1; } } diff --git a/tests/mir-opt/pre-codegen/ptr_offset.demo_byte_add_fat.PreCodegen.after.panic-unwind.mir b/tests/mir-opt/pre-codegen/ptr_offset.demo_byte_add_fat.PreCodegen.after.panic-unwind.mir index a6dad00bbdb19..f7f39d172d68d 100644 --- a/tests/mir-opt/pre-codegen/ptr_offset.demo_byte_add_fat.PreCodegen.after.panic-unwind.mir +++ b/tests/mir-opt/pre-codegen/ptr_offset.demo_byte_add_fat.PreCodegen.after.panic-unwind.mir @@ -6,13 +6,18 @@ fn demo_byte_add_fat(_1: *const [u32], _2: usize) -> *const [u32] { let mut _0: *const [u32]; scope 1 (inlined #[track_caller] std::ptr::const_ptr::::byte_add) { let mut _3: *const u8; - let mut _4: *const u8; + let mut _4: usize; + let mut _9: *const u8; scope 2 (inlined std::ptr::const_ptr::::cast::) { } scope 3 (inlined #[track_caller] std::ptr::const_ptr::::add) { + let mut _5: &usize; + let mut _6: &*const u8; + let mut _7: {closure@std::ptr::const_ptr::::add::{closure#0}}; + let _8: (); } scope 4 (inlined std::ptr::const_ptr::::with_metadata_of::<[u32]>) { - let mut _5: usize; + let mut _10: usize; scope 5 (inlined std::ptr::metadata::<[u32]>) { } scope 6 (inlined std::ptr::from_raw_parts::<[u32], ()>) { @@ -21,16 +26,32 @@ fn demo_byte_add_fat(_1: *const [u32], _2: usize) -> *const [u32] { } bb0: { - StorageLive(_4); + StorageLive(_9); StorageLive(_3); _3 = copy _1 as *const u8 (PtrToPtr); - _4 = Offset(copy _3, copy _2); - StorageDead(_3); + StorageLive(_4); + _4 = copy _2; + StorageLive(_7); StorageLive(_5); - _5 = PtrMetadata(copy _1); - _0 = *const [u32] from (copy _4, copy _5); + _5 = &_4; + StorageLive(_6); + _6 = &_3; + _7 = {closure@$SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL} { 0: copy _5, 1: copy _6 }; + StorageDead(_6); StorageDead(_5); + _8 = contract_check_requires::<{closure@std::ptr::const_ptr::::add::{closure#0}}>(move _7) -> [return: bb1, unwind continue]; + } + + bb1: { + StorageDead(_7); + _9 = Offset(copy _3, copy _2); StorageDead(_4); + StorageDead(_3); + StorageLive(_10); + _10 = PtrMetadata(copy _1); + _0 = *const [u32] from (copy _9, copy _10); + StorageDead(_10); + StorageDead(_9); return; } } diff --git a/tests/mir-opt/pre-codegen/ptr_offset.demo_byte_add_thin.PreCodegen.after.panic-unwind.mir b/tests/mir-opt/pre-codegen/ptr_offset.demo_byte_add_thin.PreCodegen.after.panic-unwind.mir index cb7f15657463b..efccb158c22b8 100644 --- a/tests/mir-opt/pre-codegen/ptr_offset.demo_byte_add_thin.PreCodegen.after.panic-unwind.mir +++ b/tests/mir-opt/pre-codegen/ptr_offset.demo_byte_add_thin.PreCodegen.after.panic-unwind.mir @@ -6,10 +6,15 @@ fn demo_byte_add_thin(_1: *const u32, _2: usize) -> *const u32 { let mut _0: *const u32; scope 1 (inlined #[track_caller] std::ptr::const_ptr::::byte_add) { let mut _3: *const u8; - let mut _4: *const u8; + let mut _4: usize; + let mut _9: *const u8; scope 2 (inlined std::ptr::const_ptr::::cast::) { } scope 3 (inlined #[track_caller] std::ptr::const_ptr::::add) { + let mut _5: &usize; + let mut _6: &*const u8; + let mut _7: {closure@std::ptr::const_ptr::::add::{closure#0}}; + let _8: (); } scope 4 (inlined std::ptr::const_ptr::::with_metadata_of::) { scope 5 (inlined std::ptr::metadata::) { @@ -20,13 +25,29 @@ fn demo_byte_add_thin(_1: *const u32, _2: usize) -> *const u32 { } bb0: { - StorageLive(_4); + StorageLive(_9); StorageLive(_3); _3 = copy _1 as *const u8 (PtrToPtr); - _4 = Offset(copy _3, copy _2); - StorageDead(_3); - _0 = copy _4 as *const u32 (PtrToPtr); + StorageLive(_4); + _4 = copy _2; + StorageLive(_7); + StorageLive(_5); + _5 = &_4; + StorageLive(_6); + _6 = &_3; + _7 = {closure@$SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL} { 0: copy _5, 1: copy _6 }; + StorageDead(_6); + StorageDead(_5); + _8 = contract_check_requires::<{closure@std::ptr::const_ptr::::add::{closure#0}}>(move _7) -> [return: bb1, unwind continue]; + } + + bb1: { + StorageDead(_7); + _9 = Offset(copy _3, copy _2); StorageDead(_4); + StorageDead(_3); + _0 = copy _9 as *const u32 (PtrToPtr); + StorageDead(_9); return; } } diff --git a/tests/mir-opt/pre-codegen/range_iter.forward_loop.PreCodegen.after.panic-unwind.mir b/tests/mir-opt/pre-codegen/range_iter.forward_loop.PreCodegen.after.panic-unwind.mir index 3b09f33e73338..27a65db7ce27f 100644 --- a/tests/mir-opt/pre-codegen/range_iter.forward_loop.PreCodegen.after.panic-unwind.mir +++ b/tests/mir-opt/pre-codegen/range_iter.forward_loop.PreCodegen.after.panic-unwind.mir @@ -5,16 +5,18 @@ fn forward_loop(_1: u32, _2: u32, _3: impl Fn(u32)) -> () { debug end => _2; debug f => _3; let mut _0: (); - let mut _7: std::option::Option; - let mut _9: &impl Fn(u32); - let mut _10: (u32,); - let _11: (); + let mut _7: u32; + let mut _8: usize; + let mut _19: std::option::Option; + let mut _21: &impl Fn(u32); + let mut _22: (u32,); + let _23: (); scope 1 { debug ((iter: std::ops::Range).0: u32) => _1; debug ((iter: std::ops::Range).1: u32) => _2; - let _8: u32; + let _20: u32; scope 2 { - debug x => _8; + debug x => _20; } scope 4 (inlined iter::range::>::next) { scope 5 (inlined as iter::range::RangeIteratorImpl>::spec_next) { @@ -22,7 +24,17 @@ fn forward_loop(_1: u32, _2: u32, _3: impl Fn(u32)) -> () { let _6: u32; scope 6 { scope 8 (inlined ::forward_unchecked) { + let mut _9: &u32; + let mut _10: &usize; + let mut _11: {closure@::forward_unchecked::{closure#0}}; + let _12: (); + let mut _13: u32; + let mut _14: u32; scope 9 (inlined #[track_caller] core::num::::unchecked_add) { + let mut _15: &u32; + let mut _16: &u32; + let mut _17: {closure@core::num::::unchecked_add::{closure#0}}; + let _18: (); scope 10 (inlined core::ub_checks::check_language_ub) { scope 11 (inlined core::ub_checks::check_language_ub::runtime) { } @@ -44,7 +56,7 @@ fn forward_loop(_1: u32, _2: u32, _3: impl Fn(u32)) -> () { } bb1: { - StorageLive(_7); + StorageLive(_19); StorageLive(_5); StorageLive(_4); _4 = copy _1; @@ -55,7 +67,7 @@ fn forward_loop(_1: u32, _2: u32, _3: impl Fn(u32)) -> () { bb2: { StorageDead(_5); - StorageDead(_7); + StorageDead(_19); drop(_3) -> [return: bb3, unwind continue]; } @@ -65,29 +77,67 @@ fn forward_loop(_1: u32, _2: u32, _3: impl Fn(u32)) -> () { bb4: { _6 = copy _1; - _1 = AddUnchecked(copy _6, const 1_u32); - _7 = Option::::Some(copy _6); - StorageDead(_5); - _8 = copy ((_7 as Some).0: u32); + StorageLive(_7); + _7 = copy _6; + StorageLive(_8); + _8 = const 1_usize; + StorageLive(_11); StorageLive(_9); - _9 = &_3; + _9 = &_7; StorageLive(_10); - _10 = (copy _8,); - _11 = >::call(move _9, move _10) -> [return: bb5, unwind: bb6]; + _10 = &_8; + _11 = {closure@$SRC_DIR/core/src/iter/range.rs:LL:COL} { 0: copy _9, 1: copy _10 }; + StorageDead(_10); + StorageDead(_9); + _12 = contract_check_requires::<{closure@::forward_unchecked::{closure#0}}>(move _11) -> [return: bb5, unwind: bb8]; } bb5: { - StorageDead(_10); - StorageDead(_9); + StorageDead(_11); + StorageLive(_13); + _13 = copy _6; + StorageLive(_14); + _14 = const 1_u32; + StorageLive(_17); + StorageLive(_15); + _15 = &_13; + StorageLive(_16); + _16 = &_14; + _17 = {closure@$SRC_DIR/core/src/num/uint_macros.rs:LL:COL} { 0: copy _15, 1: copy _16 }; + StorageDead(_16); + StorageDead(_15); + _18 = contract_check_requires::<{closure@core::num::::unchecked_add::{closure#0}}>(move _17) -> [return: bb6, unwind: bb8]; + } + + bb6: { + StorageDead(_17); + _1 = AddUnchecked(copy _6, const 1_u32); + StorageDead(_14); + StorageDead(_13); + StorageDead(_8); StorageDead(_7); + _19 = Option::::Some(copy _6); + StorageDead(_5); + _20 = copy ((_19 as Some).0: u32); + StorageLive(_21); + _21 = &_3; + StorageLive(_22); + _22 = (copy _20,); + _23 = >::call(move _21, move _22) -> [return: bb7, unwind: bb8]; + } + + bb7: { + StorageDead(_22); + StorageDead(_21); + StorageDead(_19); goto -> bb1; } - bb6 (cleanup): { - drop(_3) -> [return: bb7, unwind terminate(cleanup)]; + bb8 (cleanup): { + drop(_3) -> [return: bb9, unwind terminate(cleanup)]; } - bb7 (cleanup): { + bb9 (cleanup): { resume; } } diff --git a/tests/mir-opt/pre-codegen/range_iter.range_iter_next.PreCodegen.after.panic-unwind.mir b/tests/mir-opt/pre-codegen/range_iter.range_iter_next.PreCodegen.after.panic-unwind.mir index 1f82fc59ac2c1..6ec1b48651802 100644 --- a/tests/mir-opt/pre-codegen/range_iter.range_iter_next.PreCodegen.after.panic-unwind.mir +++ b/tests/mir-opt/pre-codegen/range_iter.range_iter_next.PreCodegen.after.panic-unwind.mir @@ -3,14 +3,26 @@ fn range_iter_next(_1: &mut std::ops::Range) -> Option { debug it => _1; let mut _0: std::option::Option; + let mut _6: u32; + let mut _7: usize; scope 1 (inlined iter::range::>::next) { scope 2 (inlined as iter::range::RangeIteratorImpl>::spec_next) { let mut _4: bool; let _5: u32; - let mut _6: u32; + let mut _18: u32; scope 3 { scope 5 (inlined ::forward_unchecked) { + let mut _8: &u32; + let mut _9: &usize; + let mut _10: {closure@::forward_unchecked::{closure#0}}; + let _11: (); + let mut _12: u32; + let mut _13: u32; scope 6 (inlined #[track_caller] core::num::::unchecked_add) { + let mut _14: &u32; + let mut _15: &u32; + let mut _16: {closure@core::num::::unchecked_add::{closure#0}}; + let _17: (); scope 7 (inlined core::ub_checks::check_language_ub) { scope 8 (inlined core::ub_checks::check_language_ub::runtime) { } @@ -39,20 +51,58 @@ fn range_iter_next(_1: &mut std::ops::Range) -> Option { bb1: { _0 = const Option::::None; - goto -> bb3; + goto -> bb5; } bb2: { _5 = copy ((*_1).0: u32); + StorageLive(_18); StorageLive(_6); - _6 = AddUnchecked(copy _5, const 1_u32); - ((*_1).0: u32) = move _6; + _6 = copy _5; + StorageLive(_7); + _7 = const 1_usize; + StorageLive(_10); + StorageLive(_8); + _8 = &_6; + StorageLive(_9); + _9 = &_7; + _10 = {closure@$SRC_DIR/core/src/iter/range.rs:LL:COL} { 0: copy _8, 1: copy _9 }; + StorageDead(_9); + StorageDead(_8); + _11 = contract_check_requires::<{closure@::forward_unchecked::{closure#0}}>(move _10) -> [return: bb3, unwind continue]; + } + + bb3: { + StorageDead(_10); + StorageLive(_12); + _12 = copy _5; + StorageLive(_13); + _13 = const 1_u32; + StorageLive(_16); + StorageLive(_14); + _14 = &_12; + StorageLive(_15); + _15 = &_13; + _16 = {closure@$SRC_DIR/core/src/num/uint_macros.rs:LL:COL} { 0: copy _14, 1: copy _15 }; + StorageDead(_15); + StorageDead(_14); + _17 = contract_check_requires::<{closure@core::num::::unchecked_add::{closure#0}}>(move _16) -> [return: bb4, unwind continue]; + } + + bb4: { + StorageDead(_16); + _18 = AddUnchecked(copy _5, const 1_u32); + StorageDead(_13); + StorageDead(_12); + StorageDead(_7); StorageDead(_6); + ((*_1).0: u32) = move _18; + StorageDead(_18); _0 = Option::::Some(copy _5); - goto -> bb3; + goto -> bb5; } - bb3: { + bb5: { StorageDead(_4); return; } diff --git a/tests/mir-opt/pre-codegen/slice_iter.enumerated_loop.PreCodegen.after.panic-unwind.mir b/tests/mir-opt/pre-codegen/slice_iter.enumerated_loop.PreCodegen.after.panic-unwind.mir index 28b12cdf36755..9459c0a6d166b 100644 --- a/tests/mir-opt/pre-codegen/slice_iter.enumerated_loop.PreCodegen.after.panic-unwind.mir +++ b/tests/mir-opt/pre-codegen/slice_iter.enumerated_loop.PreCodegen.after.panic-unwind.mir @@ -4,33 +4,34 @@ fn enumerated_loop(_1: &[T], _2: impl Fn(usize, &T)) -> () { debug slice => _1; debug f => _2; let mut _0: (); - let mut _10: std::slice::Iter<'_, T>; - let mut _11: std::iter::Enumerate>; - let mut _12: std::iter::Enumerate>; - let mut _13: &mut std::iter::Enumerate>; - let mut _14: std::option::Option<(usize, &T)>; - let mut _15: isize; - let mut _18: &impl Fn(usize, &T); - let mut _19: (usize, &T); - let _20: (); + let mut _15: std::slice::Iter<'_, T>; + let mut _16: std::iter::Enumerate>; + let mut _17: std::iter::Enumerate>; + let mut _18: &mut std::iter::Enumerate>; + let mut _19: std::option::Option<(usize, &T)>; + let mut _20: isize; + let mut _23: &impl Fn(usize, &T); + let mut _24: (usize, &T); + let _25: (); scope 1 { - debug iter => _12; - let _16: usize; - let _17: &T; + debug iter => _17; + let _21: usize; + let _22: &T; scope 2 { - debug i => _16; - debug x => _17; + debug i => _21; + debug x => _22; } } scope 3 (inlined core::slice::::iter) { scope 4 (inlined std::slice::Iter::<'_, T>::new) { let _3: usize; let mut _7: *mut T; - let mut _8: *mut T; + let mut _8: usize; + let mut _13: *mut T; scope 5 { let _6: std::ptr::NonNull; scope 6 { - let _9: *const T; + let _14: *const T; scope 7 { } scope 11 (inlined std::ptr::without_provenance::) { @@ -40,6 +41,10 @@ fn enumerated_loop(_1: &[T], _2: impl Fn(usize, &T)) -> () { scope 13 (inlined NonNull::::as_ptr) { } scope 14 (inlined #[track_caller] std::ptr::mut_ptr::::add) { + let mut _9: &usize; + let mut _10: &*mut T; + let mut _11: {closure@std::ptr::mut_ptr::::add::{closure#0}}; + let _12: (); } } scope 8 (inlined NonNull::<[T]>::from_ref) { @@ -61,10 +66,10 @@ fn enumerated_loop(_1: &[T], _2: impl Fn(usize, &T)) -> () { } bb0: { - StorageLive(_10); + StorageLive(_15); StorageLive(_3); StorageLive(_6); - StorageLive(_9); + StorageLive(_14); StorageLive(_4); _3 = PtrMetadata(copy _1); _4 = &raw const (*_1); @@ -72,82 +77,98 @@ fn enumerated_loop(_1: &[T], _2: impl Fn(usize, &T)) -> () { _5 = copy _4 as *const T (PtrToPtr); _6 = NonNull:: { pointer: copy _5 }; StorageDead(_5); - switchInt(const ::IS_ZST) -> [0: bb1, otherwise: bb2]; + switchInt(const ::IS_ZST) -> [0: bb1, otherwise: bb3]; } bb1: { - StorageLive(_8); + StorageLive(_13); StorageLive(_7); _7 = copy _4 as *mut T (PtrToPtr); - _8 = Offset(copy _7, copy _3); - StorageDead(_7); - _9 = copy _8 as *const T (PtrToPtr); - StorageDead(_8); - goto -> bb3; + StorageLive(_8); + _8 = copy _3; + StorageLive(_11); + StorageLive(_9); + _9 = &_8; + StorageLive(_10); + _10 = &_7; + _11 = {closure@$SRC_DIR/core/src/ptr/mut_ptr.rs:LL:COL} { 0: copy _9, 1: copy _10 }; + StorageDead(_10); + StorageDead(_9); + _12 = contract_check_requires::<{closure@std::ptr::mut_ptr::::add::{closure#0}}>(move _11) -> [return: bb2, unwind: bb12]; } bb2: { - _9 = copy _3 as *const T (Transmute); - goto -> bb3; + StorageDead(_11); + _13 = Offset(copy _7, copy _3); + StorageDead(_8); + StorageDead(_7); + _14 = copy _13 as *const T (PtrToPtr); + StorageDead(_13); + goto -> bb4; } bb3: { - _10 = std::slice::Iter::<'_, T> { ptr: copy _6, end_or_len: copy _9, _marker: const ZeroSized: PhantomData<&T> }; - StorageDead(_4); - StorageDead(_9); - StorageDead(_6); - StorageDead(_3); - _11 = Enumerate::> { iter: copy _10, count: const 0_usize }; - StorageDead(_10); - StorageLive(_12); - _12 = copy _11; + _14 = copy _3 as *const T (Transmute); goto -> bb4; } bb4: { - _13 = &mut _12; - _14 = > as Iterator>::next(move _13) -> [return: bb5, unwind: bb11]; + _15 = std::slice::Iter::<'_, T> { ptr: copy _6, end_or_len: copy _14, _marker: const ZeroSized: PhantomData<&T> }; + StorageDead(_4); + StorageDead(_14); + StorageDead(_6); + StorageDead(_3); + _16 = Enumerate::> { iter: copy _15, count: const 0_usize }; + StorageDead(_15); + StorageLive(_17); + _17 = copy _16; + goto -> bb5; } bb5: { - _15 = discriminant(_14); - switchInt(move _15) -> [0: bb6, 1: bb8, otherwise: bb10]; + _18 = &mut _17; + _19 = > as Iterator>::next(move _18) -> [return: bb6, unwind: bb12]; } bb6: { - StorageDead(_12); - drop(_2) -> [return: bb7, unwind continue]; + _20 = discriminant(_19); + switchInt(move _20) -> [0: bb7, 1: bb9, otherwise: bb11]; } bb7: { - return; + StorageDead(_17); + drop(_2) -> [return: bb8, unwind continue]; } bb8: { - _16 = copy (((_14 as Some).0: (usize, &T)).0: usize); - _17 = copy (((_14 as Some).0: (usize, &T)).1: &T); - StorageLive(_18); - _18 = &_2; - StorageLive(_19); - _19 = copy ((_14 as Some).0: (usize, &T)); - _20 = >::call(move _18, move _19) -> [return: bb9, unwind: bb11]; + return; } bb9: { - StorageDead(_19); - StorageDead(_18); - goto -> bb4; + _21 = copy (((_19 as Some).0: (usize, &T)).0: usize); + _22 = copy (((_19 as Some).0: (usize, &T)).1: &T); + StorageLive(_23); + _23 = &_2; + StorageLive(_24); + _24 = copy ((_19 as Some).0: (usize, &T)); + _25 = >::call(move _23, move _24) -> [return: bb10, unwind: bb12]; } bb10: { - unreachable; + StorageDead(_24); + StorageDead(_23); + goto -> bb5; } - bb11 (cleanup): { - drop(_2) -> [return: bb12, unwind terminate(cleanup)]; + bb11: { + unreachable; } bb12 (cleanup): { + drop(_2) -> [return: bb13, unwind terminate(cleanup)]; + } + + bb13 (cleanup): { resume; } } diff --git a/tests/mir-opt/pre-codegen/slice_iter.forward_loop.PreCodegen.after.panic-unwind.mir b/tests/mir-opt/pre-codegen/slice_iter.forward_loop.PreCodegen.after.panic-unwind.mir index 2b5d8c27d7109..634422fc93ed0 100644 --- a/tests/mir-opt/pre-codegen/slice_iter.forward_loop.PreCodegen.after.panic-unwind.mir +++ b/tests/mir-opt/pre-codegen/slice_iter.forward_loop.PreCodegen.after.panic-unwind.mir @@ -4,76 +4,31 @@ fn forward_loop(_1: &[T], _2: impl Fn(&T)) -> () { debug slice => _1; debug f => _2; let mut _0: (); - let mut _22: std::option::Option<&T>; - let mut _24: &impl Fn(&T); - let mut _25: (&T,); - let _26: (); + let mut _15: std::slice::Iter<'_, T>; + let mut _16: std::slice::Iter<'_, T>; + let mut _17: &mut std::slice::Iter<'_, T>; + let mut _18: std::option::Option<&T>; + let mut _19: isize; + let mut _21: &impl Fn(&T); + let mut _22: (&T,); + let _23: (); scope 1 { - debug ((iter: std::slice::Iter<'_, T>).0: std::ptr::NonNull) => _6; - debug ((iter: std::slice::Iter<'_, T>).1: *const T) => _9; - debug ((iter: std::slice::Iter<'_, T>).2: std::marker::PhantomData<&T>) => const ZeroSized: PhantomData<&T>; - let _23: &T; + debug iter => _16; + let _20: &T; scope 2 { - debug x => _23; - } - scope 16 (inlined as Iterator>::next) { - let mut _6: std::ptr::NonNull; - let _10: std::ptr::NonNull; - let _12: std::ptr::NonNull; - let mut _15: bool; - let mut _19: usize; - let _21: &T; - scope 17 { - let _11: *const T; - scope 18 { - let _18: usize; - scope 19 { - scope 22 (inlined #[track_caller] core::num::::unchecked_sub) { - scope 23 (inlined core::ub_checks::check_language_ub) { - scope 24 (inlined core::ub_checks::check_language_ub::runtime) { - } - } - } - scope 25 (inlined without_provenance_mut::) { - } - } - scope 20 (inlined std::ptr::const_ptr::::addr) { - scope 21 (inlined std::ptr::const_ptr::::cast::<()>) { - } - } - scope 26 (inlined as PartialEq>::eq) { - let mut _13: *mut T; - let mut _14: *mut T; - scope 27 (inlined NonNull::::as_ptr) { - } - scope 28 (inlined NonNull::::as_ptr) { - } - } - scope 29 (inlined NonNull::::add) { - let mut _16: *const T; - let mut _17: *const T; - scope 30 (inlined NonNull::::as_ptr) { - } - } - scope 31 (inlined NonNull::::as_ref::<'_>) { - let _20: *const T; - scope 32 (inlined NonNull::::as_ptr) { - } - scope 33 (inlined std::ptr::mut_ptr::::cast_const) { - } - } - } - } + debug x => _20; } } scope 3 (inlined core::slice::::iter) { scope 4 (inlined std::slice::Iter::<'_, T>::new) { let _3: usize; let mut _7: *mut T; - let mut _8: *mut T; + let mut _8: usize; + let mut _13: *mut T; scope 5 { + let _6: std::ptr::NonNull; scope 6 { - let _9: *const T; + let _14: *const T; scope 7 { } scope 11 (inlined std::ptr::without_provenance::) { @@ -83,6 +38,10 @@ fn forward_loop(_1: &[T], _2: impl Fn(&T)) -> () { scope 13 (inlined NonNull::::as_ptr) { } scope 14 (inlined #[track_caller] std::ptr::mut_ptr::::add) { + let mut _9: &usize; + let mut _10: &*mut T; + let mut _11: {closure@std::ptr::mut_ptr::::add::{closure#0}}; + let _12: (); } } scope 8 (inlined NonNull::<[T]>::from_ref) { @@ -101,6 +60,8 @@ fn forward_loop(_1: &[T], _2: impl Fn(&T)) -> () { bb0: { StorageLive(_3); + StorageLive(_6); + StorageLive(_14); StorageLive(_4); _3 = PtrMetadata(copy _1); _4 = &raw const (*_1); @@ -108,142 +69,98 @@ fn forward_loop(_1: &[T], _2: impl Fn(&T)) -> () { _5 = copy _4 as *const T (PtrToPtr); _6 = NonNull:: { pointer: copy _5 }; StorageDead(_5); - switchInt(const ::IS_ZST) -> [0: bb1, otherwise: bb2]; + switchInt(const ::IS_ZST) -> [0: bb1, otherwise: bb3]; } bb1: { - StorageLive(_8); + StorageLive(_13); StorageLive(_7); _7 = copy _4 as *mut T (PtrToPtr); - _8 = Offset(copy _7, copy _3); - StorageDead(_7); - _9 = copy _8 as *const T (PtrToPtr); - StorageDead(_8); - goto -> bb3; + StorageLive(_8); + _8 = copy _3; + StorageLive(_11); + StorageLive(_9); + _9 = &_8; + StorageLive(_10); + _10 = &_7; + _11 = {closure@$SRC_DIR/core/src/ptr/mut_ptr.rs:LL:COL} { 0: copy _9, 1: copy _10 }; + StorageDead(_10); + StorageDead(_9); + _12 = contract_check_requires::<{closure@std::ptr::mut_ptr::::add::{closure#0}}>(move _11) -> [return: bb2, unwind: bb12]; } bb2: { - _9 = copy _3 as *const T (Transmute); - goto -> bb3; + StorageDead(_11); + _13 = Offset(copy _7, copy _3); + StorageDead(_8); + StorageDead(_7); + _14 = copy _13 as *const T (PtrToPtr); + StorageDead(_13); + goto -> bb4; } bb3: { - StorageDead(_4); - StorageDead(_3); + _14 = copy _3 as *const T (Transmute); goto -> bb4; } bb4: { - StorageLive(_22); - StorageLive(_10); - StorageLive(_11); - StorageLive(_18); - StorageLive(_19); - StorageLive(_12); - StorageLive(_21); - _10 = copy _6; - _11 = copy _9; - switchInt(const ::IS_ZST) -> [0: bb5, otherwise: bb8]; + _15 = std::slice::Iter::<'_, T> { ptr: copy _6, end_or_len: copy _14, _marker: const ZeroSized: PhantomData<&T> }; + StorageDead(_4); + StorageDead(_14); + StorageDead(_6); + StorageDead(_3); + StorageLive(_16); + _16 = copy _15; + goto -> bb5; } bb5: { - StorageLive(_15); - _12 = copy _11 as std::ptr::NonNull (Transmute); - StorageLive(_13); - _13 = copy _10 as *mut T (Transmute); - StorageLive(_14); - _14 = copy _12 as *mut T (Transmute); - _15 = Eq(copy _13, copy _14); - StorageDead(_14); - StorageDead(_13); - switchInt(move _15) -> [0: bb6, otherwise: bb7]; + StorageLive(_18); + _17 = &mut _16; + _18 = as Iterator>::next(move _17) -> [return: bb6, unwind: bb12]; } bb6: { - StorageDead(_15); - StorageLive(_17); - StorageLive(_16); - _16 = copy _10 as *const T (Transmute); - _17 = Offset(copy _16, const 1_usize); - StorageDead(_16); - _6 = NonNull:: { pointer: copy _17 }; - StorageDead(_17); - goto -> bb13; + _19 = discriminant(_18); + switchInt(move _19) -> [0: bb7, 1: bb9, otherwise: bb11]; } bb7: { - StorageDead(_15); - StorageDead(_21); - StorageDead(_12); - StorageDead(_19); StorageDead(_18); - StorageDead(_11); - StorageDead(_10); - goto -> bb10; + StorageDead(_16); + drop(_2) -> [return: bb8, unwind continue]; } bb8: { - _18 = copy _11 as usize (Transmute); - switchInt(copy _18) -> [0: bb9, otherwise: bb12]; + return; } bb9: { - StorageDead(_21); - StorageDead(_12); - StorageDead(_19); - StorageDead(_18); - StorageDead(_11); - StorageDead(_10); - goto -> bb10; + _20 = copy ((_18 as Some).0: &T); + StorageLive(_21); + _21 = &_2; + StorageLive(_22); + _22 = (copy _20,); + _23 = >::call(move _21, move _22) -> [return: bb10, unwind: bb12]; } bb10: { StorageDead(_22); - drop(_2) -> [return: bb11, unwind continue]; - } - - bb11: { - return; - } - - bb12: { - _19 = SubUnchecked(copy _18, const 1_usize); - _9 = copy _19 as *const T (Transmute); - goto -> bb13; - } - - bb13: { - StorageLive(_20); - _20 = copy _10 as *const T (Transmute); - _21 = &(*_20); - StorageDead(_20); - _22 = Option::<&T>::Some(copy _21); StorageDead(_21); - StorageDead(_12); - StorageDead(_19); StorageDead(_18); - StorageDead(_11); - StorageDead(_10); - _23 = copy ((_22 as Some).0: &T); - StorageLive(_24); - _24 = &_2; - StorageLive(_25); - _25 = (copy _23,); - _26 = >::call(move _24, move _25) -> [return: bb14, unwind: bb15]; + goto -> bb5; } - bb14: { - StorageDead(_25); - StorageDead(_24); - StorageDead(_22); - goto -> bb4; + bb11: { + unreachable; } - bb15 (cleanup): { - drop(_2) -> [return: bb16, unwind terminate(cleanup)]; + bb12 (cleanup): { + drop(_2) -> [return: bb13, unwind terminate(cleanup)]; } - bb16 (cleanup): { + bb13 (cleanup): { resume; } } diff --git a/tests/mir-opt/pre-codegen/slice_iter.range_loop.PreCodegen.after.panic-unwind.mir b/tests/mir-opt/pre-codegen/slice_iter.range_loop.PreCodegen.after.panic-unwind.mir index 8e573ef488fce..4865ca5e48415 100644 --- a/tests/mir-opt/pre-codegen/slice_iter.range_loop.PreCodegen.after.panic-unwind.mir +++ b/tests/mir-opt/pre-codegen/slice_iter.range_loop.PreCodegen.after.panic-unwind.mir @@ -5,20 +5,22 @@ fn range_loop(_1: &[T], _2: impl Fn(usize, &T)) -> () { debug f => _2; let mut _0: (); let mut _3: usize; - let mut _8: std::option::Option; - let mut _10: bool; - let mut _12: &impl Fn(usize, &T); - let mut _13: (usize, &T); - let _14: (); + let mut _8: usize; + let mut _9: usize; + let mut _20: std::option::Option; + let mut _22: bool; + let mut _24: &impl Fn(usize, &T); + let mut _25: (usize, &T); + let _26: (); scope 1 { debug ((iter: std::ops::Range).0: usize) => _4; debug ((iter: std::ops::Range).1: usize) => _3; - let _9: usize; + let _21: usize; scope 2 { - debug i => _9; - let _11: &T; + debug i => _21; + let _23: &T; scope 3 { - debug x => _11; + debug x => _23; } } scope 5 (inlined iter::range::>::next) { @@ -28,7 +30,17 @@ fn range_loop(_1: &[T], _2: impl Fn(usize, &T)) -> () { let _7: usize; scope 7 { scope 9 (inlined ::forward_unchecked) { + let mut _10: &usize; + let mut _11: &usize; + let mut _12: {closure@::forward_unchecked::{closure#0}}; + let _13: (); + let mut _14: usize; + let mut _15: usize; scope 10 (inlined #[track_caller] core::num::::unchecked_add) { + let mut _16: &usize; + let mut _17: &usize; + let mut _18: {closure@core::num::::unchecked_add::{closure#0}}; + let _19: (); scope 11 (inlined core::ub_checks::check_language_ub) { scope 12 (inlined core::ub_checks::check_language_ub::runtime) { } @@ -52,7 +64,7 @@ fn range_loop(_1: &[T], _2: impl Fn(usize, &T)) -> () { } bb1: { - StorageLive(_8); + StorageLive(_20); StorageLive(_6); StorageLive(_5); _5 = copy _4; @@ -63,7 +75,7 @@ fn range_loop(_1: &[T], _2: impl Fn(usize, &T)) -> () { bb2: { StorageDead(_6); - StorageDead(_8); + StorageDead(_20); drop(_2) -> [return: bb3, unwind continue]; } @@ -73,35 +85,73 @@ fn range_loop(_1: &[T], _2: impl Fn(usize, &T)) -> () { bb4: { _7 = copy _4; - _4 = AddUnchecked(copy _7, const 1_usize); - _8 = Option::::Some(copy _7); - StorageDead(_6); - _9 = copy ((_8 as Some).0: usize); - _10 = Lt(copy _9, copy _3); - assert(move _10, "index out of bounds: the length is {} but the index is {}", copy _3, copy _9) -> [success: bb5, unwind: bb7]; + StorageLive(_8); + _8 = copy _7; + StorageLive(_9); + _9 = const 1_usize; + StorageLive(_12); + StorageLive(_10); + _10 = &_8; + StorageLive(_11); + _11 = &_9; + _12 = {closure@$SRC_DIR/core/src/iter/range.rs:LL:COL} { 0: copy _10, 1: copy _11 }; + StorageDead(_11); + StorageDead(_10); + _13 = contract_check_requires::<{closure@::forward_unchecked::{closure#0}}>(move _12) -> [return: bb5, unwind: bb9]; } bb5: { - _11 = &(*_1)[_9]; - StorageLive(_12); - _12 = &_2; - StorageLive(_13); - _13 = (copy _9, copy _11); - _14 = >::call(move _12, move _13) -> [return: bb6, unwind: bb7]; + StorageDead(_12); + StorageLive(_14); + _14 = copy _7; + StorageLive(_15); + _15 = const 1_usize; + StorageLive(_18); + StorageLive(_16); + _16 = &_14; + StorageLive(_17); + _17 = &_15; + _18 = {closure@$SRC_DIR/core/src/num/uint_macros.rs:LL:COL} { 0: copy _16, 1: copy _17 }; + StorageDead(_17); + StorageDead(_16); + _19 = contract_check_requires::<{closure@core::num::::unchecked_add::{closure#0}}>(move _18) -> [return: bb6, unwind: bb9]; } bb6: { - StorageDead(_13); - StorageDead(_12); + StorageDead(_18); + _4 = AddUnchecked(copy _7, const 1_usize); + StorageDead(_15); + StorageDead(_14); + StorageDead(_9); StorageDead(_8); + _20 = Option::::Some(copy _7); + StorageDead(_6); + _21 = copy ((_20 as Some).0: usize); + _22 = Lt(copy _21, copy _3); + assert(move _22, "index out of bounds: the length is {} but the index is {}", copy _3, copy _21) -> [success: bb7, unwind: bb9]; + } + + bb7: { + _23 = &(*_1)[_21]; + StorageLive(_24); + _24 = &_2; + StorageLive(_25); + _25 = (copy _21, copy _23); + _26 = >::call(move _24, move _25) -> [return: bb8, unwind: bb9]; + } + + bb8: { + StorageDead(_25); + StorageDead(_24); + StorageDead(_20); goto -> bb1; } - bb7 (cleanup): { - drop(_2) -> [return: bb8, unwind terminate(cleanup)]; + bb9 (cleanup): { + drop(_2) -> [return: bb10, unwind terminate(cleanup)]; } - bb8 (cleanup): { + bb10 (cleanup): { resume; } } diff --git a/tests/mir-opt/pre-codegen/slice_iter.reverse_loop.PreCodegen.after.panic-unwind.mir b/tests/mir-opt/pre-codegen/slice_iter.reverse_loop.PreCodegen.after.panic-unwind.mir index cb0d640d445bb..017ecb77f4d25 100644 --- a/tests/mir-opt/pre-codegen/slice_iter.reverse_loop.PreCodegen.after.panic-unwind.mir +++ b/tests/mir-opt/pre-codegen/slice_iter.reverse_loop.PreCodegen.after.panic-unwind.mir @@ -4,33 +4,60 @@ fn reverse_loop(_1: &[T], _2: impl Fn(&T)) -> () { debug slice => _1; debug f => _2; let mut _0: (); - let mut _10: std::slice::Iter<'_, T>; - let mut _11: std::iter::Rev>; - let mut _12: std::iter::Rev>; - let mut _14: std::option::Option<&T>; - let mut _15: isize; - let mut _17: &impl Fn(&T); - let mut _18: (&T,); - let _19: (); + let mut _15: std::slice::Iter<'_, T>; + let mut _16: std::iter::Rev>; + let mut _17: std::iter::Rev>; + let mut _28: std::option::Option<&T>; + let mut _30: &impl Fn(&T); + let mut _31: (&T,); + let _32: (); scope 1 { - debug iter => _12; - let _16: &T; + debug iter => _17; + let _29: &T; scope 2 { - debug x => _16; + debug x => _29; } scope 18 (inlined > as Iterator>::next) { - let mut _13: &mut std::slice::Iter<'_, T>; + let mut _18: &mut std::slice::Iter<'_, T>; + scope 19 (inlined as DoubleEndedIterator>::next_back) { + let mut _19: *const T; + let mut _24: bool; + let mut _25: *const T; + let _27: &T; + scope 20 { + let _20: std::ptr::NonNull; + let _26: usize; + scope 21 { + } + scope 22 { + scope 25 (inlined as PartialEq>::eq) { + let mut _21: std::ptr::NonNull; + let mut _22: *mut T; + let mut _23: *mut T; + scope 26 (inlined NonNull::::as_ptr) { + } + scope 27 (inlined NonNull::::as_ptr) { + } + } + } + scope 23 (inlined std::ptr::const_ptr::::addr) { + scope 24 (inlined std::ptr::const_ptr::::cast::<()>) { + } + } + } + } } } scope 3 (inlined core::slice::::iter) { scope 4 (inlined std::slice::Iter::<'_, T>::new) { let _3: usize; let mut _7: *mut T; - let mut _8: *mut T; + let mut _8: usize; + let mut _13: *mut T; scope 5 { let _6: std::ptr::NonNull; scope 6 { - let _9: *const T; + let _14: *const T; scope 7 { } scope 11 (inlined std::ptr::without_provenance::) { @@ -40,6 +67,10 @@ fn reverse_loop(_1: &[T], _2: impl Fn(&T)) -> () { scope 13 (inlined NonNull::::as_ptr) { } scope 14 (inlined #[track_caller] std::ptr::mut_ptr::::add) { + let mut _9: &usize; + let mut _10: &*mut T; + let mut _11: {closure@std::ptr::mut_ptr::::add::{closure#0}}; + let _12: (); } } scope 8 (inlined NonNull::<[T]>::from_ref) { @@ -61,10 +92,10 @@ fn reverse_loop(_1: &[T], _2: impl Fn(&T)) -> () { } bb0: { - StorageLive(_10); + StorageLive(_15); StorageLive(_3); StorageLive(_6); - StorageLive(_9); + StorageLive(_14); StorageLive(_4); _3 = PtrMetadata(copy _1); _4 = &raw const (*_1); @@ -72,86 +103,143 @@ fn reverse_loop(_1: &[T], _2: impl Fn(&T)) -> () { _5 = copy _4 as *const T (PtrToPtr); _6 = NonNull:: { pointer: copy _5 }; StorageDead(_5); - switchInt(const ::IS_ZST) -> [0: bb1, otherwise: bb2]; + switchInt(const ::IS_ZST) -> [0: bb1, otherwise: bb3]; } bb1: { - StorageLive(_8); + StorageLive(_13); StorageLive(_7); _7 = copy _4 as *mut T (PtrToPtr); - _8 = Offset(copy _7, copy _3); - StorageDead(_7); - _9 = copy _8 as *const T (PtrToPtr); - StorageDead(_8); - goto -> bb3; + StorageLive(_8); + _8 = copy _3; + StorageLive(_11); + StorageLive(_9); + _9 = &_8; + StorageLive(_10); + _10 = &_7; + _11 = {closure@$SRC_DIR/core/src/ptr/mut_ptr.rs:LL:COL} { 0: copy _9, 1: copy _10 }; + StorageDead(_10); + StorageDead(_9); + _12 = contract_check_requires::<{closure@std::ptr::mut_ptr::::add::{closure#0}}>(move _11) -> [return: bb2, unwind: bb12]; } bb2: { - _9 = copy _3 as *const T (Transmute); - goto -> bb3; + StorageDead(_11); + _13 = Offset(copy _7, copy _3); + StorageDead(_8); + StorageDead(_7); + _14 = copy _13 as *const T (PtrToPtr); + StorageDead(_13); + goto -> bb4; } bb3: { - _10 = std::slice::Iter::<'_, T> { ptr: copy _6, end_or_len: copy _9, _marker: const ZeroSized: PhantomData<&T> }; - StorageDead(_4); - StorageDead(_9); - StorageDead(_6); - StorageDead(_3); - _11 = Rev::> { iter: copy _10 }; - StorageDead(_10); - StorageLive(_12); - _12 = copy _11; + _14 = copy _3 as *const T (Transmute); goto -> bb4; } bb4: { - StorageLive(_14); - StorageLive(_13); - _13 = &mut (_12.0: std::slice::Iter<'_, T>); - _14 = as DoubleEndedIterator>::next_back(move _13) -> [return: bb5, unwind: bb11]; + _15 = std::slice::Iter::<'_, T> { ptr: copy _6, end_or_len: copy _14, _marker: const ZeroSized: PhantomData<&T> }; + StorageDead(_4); + StorageDead(_14); + StorageDead(_6); + StorageDead(_3); + _16 = Rev::> { iter: copy _15 }; + StorageDead(_15); + StorageLive(_17); + _17 = copy _16; + goto -> bb5; } bb5: { - StorageDead(_13); - _15 = discriminant(_14); - switchInt(move _15) -> [0: bb6, 1: bb8, otherwise: bb10]; + StorageLive(_28); + StorageLive(_18); + _18 = &mut (_17.0: std::slice::Iter<'_, T>); + StorageLive(_26); + StorageLive(_25); + StorageLive(_20); + StorageLive(_27); + StorageLive(_24); + switchInt(const ::IS_ZST) -> [0: bb6, otherwise: bb7]; } bb6: { - StorageDead(_14); - StorageDead(_12); - drop(_2) -> [return: bb7, unwind continue]; + StorageLive(_19); + _19 = copy ((*_18).1: *const T); + _20 = copy _19 as std::ptr::NonNull (Transmute); + StorageDead(_19); + StorageLive(_22); + StorageLive(_21); + _21 = copy ((*_18).0: std::ptr::NonNull); + _22 = copy _21 as *mut T (Transmute); + StorageDead(_21); + StorageLive(_23); + _23 = copy _20 as *mut T (Transmute); + _24 = Eq(copy _22, copy _23); + StorageDead(_23); + StorageDead(_22); + goto -> bb8; } bb7: { - return; + _25 = copy ((*_18).1: *const T); + _26 = copy _25 as usize (Transmute); + _24 = Eq(copy _26, const 0_usize); + goto -> bb8; } bb8: { - _16 = copy ((_14 as Some).0: &T); - StorageLive(_17); - _17 = &_2; - StorageLive(_18); - _18 = (copy _16,); - _19 = >::call(move _17, move _18) -> [return: bb9, unwind: bb11]; + switchInt(move _24) -> [0: bb9, otherwise: bb14]; } bb9: { - StorageDead(_18); - StorageDead(_17); - StorageDead(_14); - goto -> bb4; + _27 = std::slice::Iter::<'_, T>::next_back_unchecked(move _18) -> [return: bb10, unwind: bb12]; } bb10: { - unreachable; + _28 = Option::<&T>::Some(copy _27); + StorageDead(_24); + StorageDead(_27); + StorageDead(_20); + StorageDead(_25); + StorageDead(_26); + StorageDead(_18); + _29 = copy ((_28 as Some).0: &T); + StorageLive(_30); + _30 = &_2; + StorageLive(_31); + _31 = (copy _29,); + _32 = >::call(move _30, move _31) -> [return: bb11, unwind: bb12]; } - bb11 (cleanup): { - drop(_2) -> [return: bb12, unwind terminate(cleanup)]; + bb11: { + StorageDead(_31); + StorageDead(_30); + StorageDead(_28); + goto -> bb5; } bb12 (cleanup): { + drop(_2) -> [return: bb13, unwind terminate(cleanup)]; + } + + bb13 (cleanup): { resume; } + + bb14: { + StorageDead(_24); + StorageDead(_27); + StorageDead(_20); + StorageDead(_25); + StorageDead(_26); + StorageDead(_18); + StorageDead(_28); + StorageDead(_17); + drop(_2) -> [return: bb15, unwind continue]; + } + + bb15: { + return; + } } diff --git a/tests/mir-opt/pre-codegen/slice_iter.slice_iter_mut_next_back.PreCodegen.after.panic-unwind.mir b/tests/mir-opt/pre-codegen/slice_iter.slice_iter_mut_next_back.PreCodegen.after.panic-unwind.mir index dfe5e206fadaf..fed3ca516798c 100644 --- a/tests/mir-opt/pre-codegen/slice_iter.slice_iter_mut_next_back.PreCodegen.after.panic-unwind.mir +++ b/tests/mir-opt/pre-codegen/slice_iter.slice_iter_mut_next_back.PreCodegen.after.panic-unwind.mir @@ -3,12 +3,92 @@ fn slice_iter_mut_next_back(_1: &mut std::slice::IterMut<'_, T>) -> Option<&mut T> { debug it => _1; let mut _0: std::option::Option<&mut T>; + scope 1 (inlined as DoubleEndedIterator>::next_back) { + let mut _2: *mut T; + let mut _7: bool; + let mut _8: *mut T; + let mut _10: &mut T; + scope 2 { + let _3: std::ptr::NonNull; + let _9: usize; + scope 3 { + } + scope 4 { + scope 7 (inlined as PartialEq>::eq) { + let mut _4: std::ptr::NonNull; + let mut _5: *mut T; + let mut _6: *mut T; + scope 8 (inlined NonNull::::as_ptr) { + } + scope 9 (inlined NonNull::::as_ptr) { + } + } + } + scope 5 (inlined std::ptr::mut_ptr::::addr) { + scope 6 (inlined std::ptr::mut_ptr::::cast::<()>) { + } + } + } + } bb0: { - _0 = as DoubleEndedIterator>::next_back(move _1) -> [return: bb1, unwind continue]; + StorageLive(_9); + StorageLive(_8); + StorageLive(_3); + StorageLive(_2); + StorageLive(_10); + StorageLive(_7); + switchInt(const ::IS_ZST) -> [0: bb1, otherwise: bb2]; } bb1: { + _2 = copy ((*_1).1: *mut T); + _3 = copy _2 as std::ptr::NonNull (Transmute); + StorageLive(_5); + StorageLive(_4); + _4 = copy ((*_1).0: std::ptr::NonNull); + _5 = copy _4 as *mut T (Transmute); + StorageDead(_4); + StorageLive(_6); + _6 = copy _3 as *mut T (Transmute); + _7 = Eq(copy _5, copy _6); + StorageDead(_6); + StorageDead(_5); + goto -> bb3; + } + + bb2: { + _8 = copy ((*_1).1: *mut T); + _9 = copy _8 as usize (Transmute); + _7 = Eq(copy _9, const 0_usize); + goto -> bb3; + } + + bb3: { + switchInt(move _7) -> [0: bb4, otherwise: bb6]; + } + + bb4: { + _10 = std::slice::IterMut::<'_, T>::next_back_unchecked(move _1) -> [return: bb5, unwind continue]; + } + + bb5: { + _0 = Option::<&mut T>::Some(copy _10); + goto -> bb7; + } + + bb6: { + _0 = const {transmute(0x0000000000000000): Option<&mut T>}; + goto -> bb7; + } + + bb7: { + StorageDead(_7); + StorageDead(_10); + StorageDead(_2); + StorageDead(_3); + StorageDead(_8); + StorageDead(_9); return; } } diff --git a/tests/mir-opt/pre-codegen/slice_iter.slice_iter_next.PreCodegen.after.panic-unwind.mir b/tests/mir-opt/pre-codegen/slice_iter.slice_iter_next.PreCodegen.after.panic-unwind.mir index cc0fce26149e3..fdde07173437b 100644 --- a/tests/mir-opt/pre-codegen/slice_iter.slice_iter_next.PreCodegen.after.panic-unwind.mir +++ b/tests/mir-opt/pre-codegen/slice_iter.slice_iter_next.PreCodegen.after.panic-unwind.mir @@ -3,134 +3,12 @@ fn slice_iter_next(_1: &mut std::slice::Iter<'_, T>) -> Option<&T> { debug it => _1; let mut _0: std::option::Option<&T>; - scope 1 (inlined as Iterator>::next) { - let _2: std::ptr::NonNull; - let _4: std::ptr::NonNull; - let mut _7: bool; - let mut _10: std::ptr::NonNull; - let mut _12: usize; - let _14: &T; - scope 2 { - let _3: *const T; - scope 3 { - let _11: usize; - scope 4 { - scope 7 (inlined #[track_caller] core::num::::unchecked_sub) { - scope 8 (inlined core::ub_checks::check_language_ub) { - scope 9 (inlined core::ub_checks::check_language_ub::runtime) { - } - } - } - scope 10 (inlined without_provenance_mut::) { - } - } - scope 5 (inlined std::ptr::const_ptr::::addr) { - scope 6 (inlined std::ptr::const_ptr::::cast::<()>) { - } - } - scope 11 (inlined as PartialEq>::eq) { - let mut _5: *mut T; - let mut _6: *mut T; - scope 12 (inlined NonNull::::as_ptr) { - } - scope 13 (inlined NonNull::::as_ptr) { - } - } - scope 14 (inlined NonNull::::add) { - let mut _8: *const T; - let mut _9: *const T; - scope 15 (inlined NonNull::::as_ptr) { - } - } - scope 16 (inlined NonNull::::as_ref::<'_>) { - let _13: *const T; - scope 17 (inlined NonNull::::as_ptr) { - } - scope 18 (inlined std::ptr::mut_ptr::::cast_const) { - } - } - } - } - } bb0: { - StorageLive(_2); - StorageLive(_3); - StorageLive(_11); - StorageLive(_12); - StorageLive(_4); - StorageLive(_14); - _2 = copy ((*_1).0: std::ptr::NonNull); - _3 = copy ((*_1).1: *const T); - switchInt(const ::IS_ZST) -> [0: bb1, otherwise: bb4]; + _0 = as Iterator>::next(move _1) -> [return: bb1, unwind continue]; } bb1: { - StorageLive(_7); - _4 = copy _3 as std::ptr::NonNull (Transmute); - StorageLive(_5); - _5 = copy _2 as *mut T (Transmute); - StorageLive(_6); - _6 = copy _4 as *mut T (Transmute); - _7 = Eq(copy _5, copy _6); - StorageDead(_6); - StorageDead(_5); - switchInt(move _7) -> [0: bb2, otherwise: bb3]; - } - - bb2: { - StorageDead(_7); - StorageLive(_10); - StorageLive(_9); - StorageLive(_8); - _8 = copy _2 as *const T (Transmute); - _9 = Offset(copy _8, const 1_usize); - StorageDead(_8); - _10 = NonNull:: { pointer: copy _9 }; - StorageDead(_9); - ((*_1).0: std::ptr::NonNull) = move _10; - StorageDead(_10); - goto -> bb7; - } - - bb3: { - _0 = const {transmute(0x0000000000000000): Option<&T>}; - StorageDead(_7); - goto -> bb8; - } - - bb4: { - _11 = copy _3 as usize (Transmute); - switchInt(copy _11) -> [0: bb5, otherwise: bb6]; - } - - bb5: { - _0 = const {transmute(0x0000000000000000): Option<&T>}; - goto -> bb8; - } - - bb6: { - _12 = SubUnchecked(copy _11, const 1_usize); - ((*_1).1: *const T) = copy _12 as *const T (Transmute); - goto -> bb7; - } - - bb7: { - StorageLive(_13); - _13 = copy _2 as *const T (Transmute); - _14 = &(*_13); - StorageDead(_13); - _0 = Option::<&T>::Some(copy _14); - goto -> bb8; - } - - bb8: { - StorageDead(_14); - StorageDead(_4); - StorageDead(_12); - StorageDead(_11); - StorageDead(_3); - StorageDead(_2); return; } }