From e10a8f6954be2f82ec8da3489fc19c909fb0ae1b Mon Sep 17 00:00:00 2001 From: thanhnguyen-aws Date: Mon, 28 Jul 2025 10:37:36 -0700 Subject: [PATCH 01/20] add loop-invariants and harnesses --- library/core/src/slice/memchr.rs | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/library/core/src/slice/memchr.rs b/library/core/src/slice/memchr.rs index 1e1053583a617..5c7b6c03e127c 100644 --- a/library/core/src/slice/memchr.rs +++ b/library/core/src/slice/memchr.rs @@ -36,6 +36,7 @@ const fn memchr_naive(x: u8, text: &[u8]) -> Option { let mut i = 0; // FIXME(const-hack): Replace with `text.iter().pos(|c| *c == x)`. + #[safety::loop_invariant(true)] while i < text.len() { if text[i] == x { return Some(i); @@ -78,6 +79,7 @@ const fn memchr_aligned(x: u8, text: &[u8]) -> Option { // search the body of the text let repeated_x = usize::repeat_u8(x); + #[safety::loop_invariant(true)] while offset <= len - 2 * USIZE_BYTES { // SAFETY: the while's predicate guarantees a distance of at least 2 * usize_bytes // between the offset and the end of the slice. @@ -139,6 +141,7 @@ pub fn memrchr(x: u8, text: &[u8]) -> Option { let repeated_x = usize::repeat_u8(x); let chunk_bytes = size_of::(); + #[safety::loop_invariant(true)] while offset > min_aligned_offset { // SAFETY: offset starts at len - suffix.len(), as long as it is greater than // min_aligned_offset (prefix.len()) the remaining distance is at least 2 * chunk_bytes. @@ -159,3 +162,30 @@ pub fn memrchr(x: u8, text: &[u8]) -> Option { // Find the byte before the point the body loop stopped. text[..offset].iter().rposition(|elt| *elt == x) } + +#[cfg(kani)] +#[unstable(feature = "kani", issue = "none")] +pub mod verify { + use super::*; + + #[kani::proof] + #[cfg(not(all(target_arch = "x86_64", target_feature = "sse2")))] + pub fn check_memchr_naive() { + const ARR_SIZE: usize = 1000; + let x: u8 = kani::any(); + let a: [u8; ARR_SIZE] = kani::any(); + let text = kani::slice::any_slice_of_array(&a); + let _result = memchr_naive(x, text); + } + + #[kani::proof] + #[cfg(not(all(target_arch = "x86_64", target_feature = "sse2")))] + pub fn check_memchr_naive() { + const ARR_SIZE: usize = 1000; + let x: u8 = kani::any(); + let a: [u8; ARR_SIZE] = kani::any(); + let text = kani::slice::any_slice_of_array(&a); + let _result = memrchr(x, text); + } + +} From 8f02ac9bcca7b74bc190c5c823b07a9c53c063f5 Mon Sep 17 00:00:00 2001 From: thanhnguyen-aws Date: Mon, 28 Jul 2025 10:57:23 -0700 Subject: [PATCH 02/20] format fix --- library/core/src/slice/memchr.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/library/core/src/slice/memchr.rs b/library/core/src/slice/memchr.rs index 5c7b6c03e127c..d2dcea9acf177 100644 --- a/library/core/src/slice/memchr.rs +++ b/library/core/src/slice/memchr.rs @@ -172,7 +172,7 @@ pub mod verify { #[cfg(not(all(target_arch = "x86_64", target_feature = "sse2")))] pub fn check_memchr_naive() { const ARR_SIZE: usize = 1000; - let x: u8 = kani::any(); + let x: u8 = kani::any(); let a: [u8; ARR_SIZE] = kani::any(); let text = kani::slice::any_slice_of_array(&a); let _result = memchr_naive(x, text); @@ -182,10 +182,9 @@ pub mod verify { #[cfg(not(all(target_arch = "x86_64", target_feature = "sse2")))] pub fn check_memchr_naive() { const ARR_SIZE: usize = 1000; - let x: u8 = kani::any(); + let x: u8 = kani::any(); let a: [u8; ARR_SIZE] = kani::any(); let text = kani::slice::any_slice_of_array(&a); let _result = memrchr(x, text); } - } From 617804e4e23be2943ef6044e4aa29f04c2abfddb Mon Sep 17 00:00:00 2001 From: thanhnguyen-aws Date: Mon, 28 Jul 2025 15:24:27 -0700 Subject: [PATCH 03/20] fix harness name --- library/core/src/slice/memchr.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/core/src/slice/memchr.rs b/library/core/src/slice/memchr.rs index d2dcea9acf177..c9e1d70c0065e 100644 --- a/library/core/src/slice/memchr.rs +++ b/library/core/src/slice/memchr.rs @@ -180,7 +180,7 @@ pub mod verify { #[kani::proof] #[cfg(not(all(target_arch = "x86_64", target_feature = "sse2")))] - pub fn check_memchr_naive() { + pub fn check_memchr() { const ARR_SIZE: usize = 1000; let x: u8 = kani::any(); let a: [u8; ARR_SIZE] = kani::any(); From dcf1882b4361437a0e0c17fa70ebd9c3435afa80 Mon Sep 17 00:00:00 2001 From: thanhnguyen-aws Date: Tue, 29 Jul 2025 12:47:17 -0700 Subject: [PATCH 04/20] add crate::kani --- library/core/src/slice/memchr.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/library/core/src/slice/memchr.rs b/library/core/src/slice/memchr.rs index c9e1d70c0065e..d4c900997b0b7 100644 --- a/library/core/src/slice/memchr.rs +++ b/library/core/src/slice/memchr.rs @@ -167,7 +167,8 @@ pub fn memrchr(x: u8, text: &[u8]) -> Option { #[unstable(feature = "kani", issue = "none")] pub mod verify { use super::*; - + use crate::kani; + #[kani::proof] #[cfg(not(all(target_arch = "x86_64", target_feature = "sse2")))] pub fn check_memchr_naive() { From 5ccddc7fac5eecd1067f45e0cda4eeebe495d8b1 Mon Sep 17 00:00:00 2001 From: thanhnguyen-aws Date: Tue, 29 Jul 2025 13:27:16 -0700 Subject: [PATCH 05/20] fix format --- library/core/src/slice/memchr.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/core/src/slice/memchr.rs b/library/core/src/slice/memchr.rs index d4c900997b0b7..d993ca5e5664e 100644 --- a/library/core/src/slice/memchr.rs +++ b/library/core/src/slice/memchr.rs @@ -168,7 +168,7 @@ pub fn memrchr(x: u8, text: &[u8]) -> Option { pub mod verify { use super::*; use crate::kani; - + #[kani::proof] #[cfg(not(all(target_arch = "x86_64", target_feature = "sse2")))] pub fn check_memchr_naive() { From 484437753c08112b8d7c7dc474abf8295677d6d6 Mon Sep 17 00:00:00 2001 From: thanhnguyen-aws Date: Tue, 29 Jul 2025 13:35:08 -0700 Subject: [PATCH 06/20] fix format --- library/core/src/slice/memchr.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/core/src/slice/memchr.rs b/library/core/src/slice/memchr.rs index d993ca5e5664e..606ae0dd42e46 100644 --- a/library/core/src/slice/memchr.rs +++ b/library/core/src/slice/memchr.rs @@ -79,7 +79,7 @@ const fn memchr_aligned(x: u8, text: &[u8]) -> Option { // search the body of the text let repeated_x = usize::repeat_u8(x); - #[safety::loop_invariant(true)] + #[safety::loop_invariant(len >= 2 * USIZE_BYTES)] while offset <= len - 2 * USIZE_BYTES { // SAFETY: the while's predicate guarantees a distance of at least 2 * usize_bytes // between the offset and the end of the slice. From ce548520d871dc7a51524a959d46e308852c8ff0 Mon Sep 17 00:00:00 2001 From: thanhnguyen-aws Date: Tue, 29 Jul 2025 13:42:27 -0700 Subject: [PATCH 07/20] fix invariant --- library/core/src/slice/memchr.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/core/src/slice/memchr.rs b/library/core/src/slice/memchr.rs index 606ae0dd42e46..57e8714fcd64b 100644 --- a/library/core/src/slice/memchr.rs +++ b/library/core/src/slice/memchr.rs @@ -79,7 +79,7 @@ const fn memchr_aligned(x: u8, text: &[u8]) -> Option { // search the body of the text let repeated_x = usize::repeat_u8(x); - #[safety::loop_invariant(len >= 2 * USIZE_BYTES)] + #[safety::loop_invariant(len >= 2 * USIZE_BYTES && )] while offset <= len - 2 * USIZE_BYTES { // SAFETY: the while's predicate guarantees a distance of at least 2 * usize_bytes // between the offset and the end of the slice. From 9bd7cc239f881468cb80691cc9a7a3323bee4511 Mon Sep 17 00:00:00 2001 From: thanhnguyen-aws Date: Tue, 29 Jul 2025 13:57:53 -0700 Subject: [PATCH 08/20] fix invariant --- library/core/src/slice/memchr.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/core/src/slice/memchr.rs b/library/core/src/slice/memchr.rs index 57e8714fcd64b..f181d2b314f59 100644 --- a/library/core/src/slice/memchr.rs +++ b/library/core/src/slice/memchr.rs @@ -79,7 +79,7 @@ const fn memchr_aligned(x: u8, text: &[u8]) -> Option { // search the body of the text let repeated_x = usize::repeat_u8(x); - #[safety::loop_invariant(len >= 2 * USIZE_BYTES && )] + #[safety::loop_invariant(len >= 2 * USIZE_BYTES && offset <= len)] while offset <= len - 2 * USIZE_BYTES { // SAFETY: the while's predicate guarantees a distance of at least 2 * usize_bytes // between the offset and the end of the slice. From 71c5bf32664c574a8163591bdc2d6a6539aac763 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 29 Jul 2025 22:43:03 +0000 Subject: [PATCH 09/20] Merge subtree update for toolchain nightly-2025-07-21 (#428) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is an automated PR to merge library subtree updates from 2025-07-14 (rust-lang/rust@e9182f195b8505c87c4bd055b9f6e114ccda0981) to 2025-07-21 (rust-lang/rust@9982d6462bedf1e793f7b2dbd655a4e57cdf67d4) (inclusive) into main. `git merge` resulted in conflicts, which require manual resolution. Files were commited with merge conflict markers. **Do not remove or edit the following annotations:** git-subtree-dir: library git-subtree-split: 62f86139860b485f63a6f89c49bee34d6079881b --------- Signed-off-by: Ayush Singh Signed-off-by: xizheyin Signed-off-by: Huang Qi Co-authored-by: Marijn Schouten Co-authored-by: Oli Scherer Co-authored-by: Ralf Jung Co-authored-by: Matthias Krüger <476013+matthiaskrgr@users.noreply.github.com> Co-authored-by: наб Co-authored-by: bors Co-authored-by: klensy Co-authored-by: Folkert de Vries Co-authored-by: clubby789 Co-authored-by: Tsukasa OI Co-authored-by: Jana Dönszelmann Co-authored-by: Deadbeef Co-authored-by: Guillaume Gomez Co-authored-by: Daniel Paoliello Co-authored-by: Urgau Co-authored-by: Michael Goulet Co-authored-by: Jacob Pratt Co-authored-by: The Miri Cronjob Bot Co-authored-by: Ayush Singh Co-authored-by: Trevor Gross Co-authored-by: Kurt Heiritz (pseudo) Co-authored-by: Jubilee Co-authored-by: Ben Kimock Co-authored-by: Joshua Gentry Co-authored-by: dvdsk Co-authored-by: Yotam Ofek Co-authored-by: Simonas Kazlauskas Co-authored-by: xizheyin Co-authored-by: 许杰友 Jieyou Xu (Joe) <39484203+jieyouxu@users.noreply.github.com> Co-authored-by: Chris Denton Co-authored-by: Md. Yeasin Arafat Co-authored-by: mejrs <59372212+mejrs@users.noreply.github.com> Co-authored-by: Jakub Beránek Co-authored-by: Amanieu d'Antras Co-authored-by: ltdk Co-authored-by: okaneco <47607823+okaneco@users.noreply.github.com> Co-authored-by: kilavvy <140459108+kilavvy@users.noreply.github.com> Co-authored-by: Pavel Grigorenko Co-authored-by: Esteban Küber Co-authored-by: sayantn Co-authored-by: George Tokmaji Co-authored-by: Moulins Co-authored-by: Josh Stone Co-authored-by: Trevor Gross Co-authored-by: B I Mohammed Abbas Co-authored-by: Benoît du Garreau Co-authored-by: Josh Triplett Co-authored-by: yukang Co-authored-by: Roger Curley Co-authored-by: LevitatingBusinessMan (Rein Fernhout) Co-authored-by: Harvey Hunt Co-authored-by: Colten <70793703+ColtenOuO@users.noreply.github.com> Co-authored-by: Huang Qi Co-authored-by: nazo6 Co-authored-by: León Orell Valerian Liehr Co-authored-by: Orson Peters Co-authored-by: René Kijewski Co-authored-by: Nik Revenco Co-authored-by: Tim (Theemathas) Chirananthavat Co-authored-by: Samuel Tardieu Co-authored-by: Travis Cross Co-authored-by: David Mládek Co-authored-by: Cameron Steffen Co-authored-by: Martin Ombura Jr <8682597+martinomburajr@users.noreply.github.com> Co-authored-by: Luigi Sartor Piucco Co-authored-by: Rémy Rakic Co-authored-by: Nurzhan Sakén Co-authored-by: gitbot Co-authored-by: Michael Tautschnig --- .github/workflows/flux.yml | 2 +- library/Cargo.lock | 4 +- library/alloc/src/boxed/thin.rs | 7 +- library/alloc/src/fmt.rs | 4 +- library/alloc/src/lib.rs | 1 + library/alloc/src/string.rs | 2 +- library/alloc/src/vec/mod.rs | 22 +- library/core/src/any.rs | 2 +- library/core/src/array/mod.rs | 10 +- library/core/src/async_iter/async_iter.rs | 10 +- library/core/src/cell.rs | 8 +- library/core/src/cmp.rs | 8 +- library/core/src/cmp/bytewise.rs | 8 +- library/core/src/default.rs | 4 +- library/core/src/ffi/c_str.rs | 26 +- library/core/src/fmt/mod.rs | 1 - library/core/src/fmt/num.rs | 8 +- library/core/src/intrinsics/mod.rs | 32 +- library/core/src/io/borrowed_buf.rs | 86 +++-- library/core/src/iter/adapters/zip.rs | 74 ++--- library/core/src/iter/sources/empty.rs | 2 +- library/core/src/iter/traits/collect.rs | 1 - library/core/src/iter/traits/iterator.rs | 8 +- library/core/src/lib.rs | 9 +- library/core/src/macros/mod.rs | 18 +- library/core/src/marker.rs | 2 +- library/core/src/mem/maybe_uninit.rs | 114 +------ library/core/src/mem/mod.rs | 2 +- library/core/src/net/ip_addr.rs | 1 - library/core/src/num/f128.rs | 6 - library/core/src/num/f16.rs | 6 - library/core/src/num/f32.rs | 6 - library/core/src/num/f64.rs | 6 - library/core/src/num/int_log10.rs | 4 +- library/core/src/num/nonzero.rs | 5 +- library/core/src/num/uint_macros.rs | 45 ++- library/core/src/num/wrapping.rs | 48 +-- library/core/src/ops/function.rs | 16 +- library/core/src/ops/index.rs | 6 +- library/core/src/option.rs | 142 ++++++--- library/core/src/prelude/v1.rs | 2 +- library/core/src/primitive_docs.rs | 3 - library/core/src/ptr/alignment.rs | 2 +- library/core/src/ptr/const_ptr.rs | 5 +- library/core/src/ptr/docs/add.md | 2 +- library/core/src/ptr/docs/as_uninit_slice.md | 20 +- library/core/src/ptr/docs/offset.md | 2 +- library/core/src/ptr/mod.rs | 168 +++++----- library/core/src/ptr/mut_ptr.rs | 5 +- library/core/src/ptr/non_null.rs | 5 +- library/core/src/random.rs | 52 ++- library/core/src/range.rs | 16 +- library/core/src/slice/cmp.rs | 22 +- library/core/src/slice/index.rs | 45 ++- library/core/src/slice/mod.rs | 27 +- library/core/src/str/mod.rs | 12 +- library/core/src/str/traits.rs | 40 ++- library/core/src/tuple.rs | 11 - library/core/src/ub_checks.rs | 18 +- library/coretests/tests/floats/f128.rs | 153 --------- library/coretests/tests/floats/f16.rs | 143 --------- library/coretests/tests/floats/f32.rs | 143 --------- library/coretests/tests/floats/f64.rs | 142 --------- library/coretests/tests/floats/mod.rs | 301 ++++++++++++++++-- library/coretests/tests/io/borrowed_buf.rs | 44 ++- .../coretests/tests/iter/adapters/cloned.rs | 3 +- library/coretests/tests/iter/adapters/zip.rs | 119 +++++-- library/coretests/tests/lib.rs | 2 +- library/proc_macro/Cargo.toml | 2 +- library/proc_macro/src/lib.rs | 6 +- library/std/Cargo.toml | 5 +- library/std/src/io/mod.rs | 6 +- library/std/src/lib.rs | 1 - library/std/src/num/f32.rs | 12 +- library/std/src/num/f64.rs | 12 +- library/std/src/prelude/v1.rs | 2 +- library/std/src/random.rs | 24 +- library/std/src/sync/poison.rs | 2 +- library/std/src/sys/backtrace.rs | 100 +++--- .../src/sys/net/connection/socket/windows.rs | 36 ++- .../std/src/sys/pal/sgx/abi/usercalls/mod.rs | 4 +- library/std/src/sys/pal/windows/c.rs | 69 +--- library/std/src/sys/sync/once/futex.rs | 10 +- library/std/src/sys/sync/once/queue.rs | 11 +- library/std/src/thread/local.rs | 23 ++ library/sysroot/Cargo.toml | 1 + library/windows_targets/src/lib.rs | 23 +- rust-toolchain.toml | 2 +- tool_config/kani-version.toml | 2 +- 89 files changed, 1211 insertions(+), 1415 deletions(-) diff --git a/.github/workflows/flux.yml b/.github/workflows/flux.yml index ad1370c04f807..c1bbd32844fb9 100644 --- a/.github/workflows/flux.yml +++ b/.github/workflows/flux.yml @@ -9,7 +9,7 @@ on: env: FIXPOINT_VERSION: "556104ba5508891c357b0bdf819ce706e93d9349" - FLUX_VERSION: "f6bdf90c54ad6eed51b25c125f251cac01cfe170" + FLUX_VERSION: "ebafb8d0ca32d8c0fcd2a0cfef6b1b4bd4dc4a6f" jobs: check-flux-on-core: diff --git a/library/Cargo.lock b/library/Cargo.lock index c681c5935df5f..8b5275e5065e6 100644 --- a/library/Cargo.lock +++ b/library/Cargo.lock @@ -271,9 +271,9 @@ dependencies = [ [[package]] name = "rustc-literal-escaper" -version = "0.0.4" +version = "0.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab03008eb631b703dd16978282ae36c73282e7922fe101a4bd072a40ecea7b8b" +checksum = "e4ee29da77c5a54f42697493cd4c9b9f31b74df666a6c04dfc4fde77abe0438b" dependencies = [ "rustc-std-workspace-core", "rustc-std-workspace-std", diff --git a/library/alloc/src/boxed/thin.rs b/library/alloc/src/boxed/thin.rs index 21425b9846e42..1cce36606d2c0 100644 --- a/library/alloc/src/boxed/thin.rs +++ b/library/alloc/src/boxed/thin.rs @@ -5,7 +5,7 @@ use core::error::Error; use core::fmt::{self, Debug, Display, Formatter}; #[cfg(not(no_global_oom_handling))] -use core::intrinsics::const_allocate; +use core::intrinsics::{const_allocate, const_make_global}; use core::marker::PhantomData; #[cfg(not(no_global_oom_handling))] use core::marker::Unsize; @@ -340,9 +340,10 @@ impl WithHeader { alloc.add(metadata_offset).cast(); // SAFETY: `*metadata_ptr` is within the allocation. metadata_ptr.write(ptr::metadata::(ptr::dangling::() as *const Dyn)); - + // SAFETY: valid heap allocation + const_make_global(alloc); // SAFETY: we have just written the metadata. - &*(metadata_ptr) + &*metadata_ptr } }; diff --git a/library/alloc/src/fmt.rs b/library/alloc/src/fmt.rs index 30f42050ac8ac..d0ba9c398864f 100644 --- a/library/alloc/src/fmt.rs +++ b/library/alloc/src/fmt.rs @@ -348,13 +348,13 @@ //! format := '{' [ argument ] [ ':' format_spec ] [ ws ] * '}' //! argument := integer | identifier //! -//! format_spec := [[fill]align][sign]['#']['0'][width]['.' precision]type +//! format_spec := [[fill]align][sign]['#']['0'][width]['.' precision][type] //! fill := character //! align := '<' | '^' | '>' //! sign := '+' | '-' //! width := count //! precision := count | '*' -//! type := '' | '?' | 'x?' | 'X?' | identifier +//! type := '?' | 'x?' | 'X?' | identifier //! count := parameter | integer //! parameter := argument '$' //! ``` diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs index 9bf8fcae64e91..4bb82fb519cd0 100644 --- a/library/alloc/src/lib.rs +++ b/library/alloc/src/lib.rs @@ -156,6 +156,7 @@ #![feature(try_trait_v2)] #![feature(try_with_capacity)] #![feature(tuple_trait)] +#![feature(ub_checks)] #![feature(unicode_internals)] #![feature(unsize)] #![feature(unwrap_infallible)] diff --git a/library/alloc/src/string.rs b/library/alloc/src/string.rs index 58baa07bc1741..a189c00a6b61d 100644 --- a/library/alloc/src/string.rs +++ b/library/alloc/src/string.rs @@ -2611,7 +2611,7 @@ impl_eq! { Cow<'a, str>, &'b str } impl_eq! { Cow<'a, str>, String } #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_const_unstable(feature = "const_default", issue = "67792")] +#[rustc_const_unstable(feature = "const_default", issue = "143894")] impl const Default for String { /// Creates an empty `String`. #[inline] diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs index 51071eb974c38..d69b31da8c27f 100644 --- a/library/alloc/src/vec/mod.rs +++ b/library/alloc/src/vec/mod.rs @@ -64,7 +64,7 @@ use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties}; use core::ops::{self, Index, IndexMut, Range, RangeBounds}; use core::ptr::{self, NonNull}; use core::slice::{self, SliceIndex}; -use core::{fmt, intrinsics}; +use core::{fmt, intrinsics, ub_checks}; #[stable(feature = "extract_if", since = "1.87.0")] pub use self::extract_if::ExtractIf; @@ -1058,6 +1058,11 @@ impl Vec { #[inline] #[unstable(feature = "allocator_api", issue = "32838")] pub unsafe fn from_raw_parts_in(ptr: *mut T, length: usize, capacity: usize, alloc: A) -> Self { + ub_checks::assert_unsafe_precondition!( + check_library_ub, + "Vec::from_raw_parts_in requires that length <= capacity", + (length: usize = length, capacity: usize = capacity) => length <= capacity + ); unsafe { Vec { buf: RawVec::from_raw_parts_in(ptr, capacity, alloc), len: length } } } @@ -1174,6 +1179,11 @@ impl Vec { #[unstable(feature = "allocator_api", reason = "new API", issue = "32838")] // #[unstable(feature = "box_vec_non_null", issue = "130364")] pub unsafe fn from_parts_in(ptr: NonNull, length: usize, capacity: usize, alloc: A) -> Self { + ub_checks::assert_unsafe_precondition!( + check_library_ub, + "Vec::from_parts_in requires that length <= capacity", + (length: usize = length, capacity: usize = capacity) => length <= capacity + ); unsafe { Vec { buf: RawVec::from_nonnull_in(ptr, capacity, alloc), len: length } } } @@ -1950,7 +1960,11 @@ impl Vec { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn set_len(&mut self, new_len: usize) { - debug_assert!(new_len <= self.capacity()); + ub_checks::assert_unsafe_precondition!( + check_library_ub, + "Vec::set_len requires that new_len <= capacity()", + (new_len: usize = new_len, capacity: usize = self.capacity()) => new_len <= capacity + ); self.len = new_len; } @@ -3695,7 +3709,7 @@ impl Vec { /// This is optimal if: /// /// * The tail (elements in the vector after `range`) is empty, - /// * or `replace_with` yields fewer or equal elements than `range`’s length + /// * or `replace_with` yields fewer or equal elements than `range`'s length /// * or the lower bound of its `size_hint()` is exact. /// /// Otherwise, a temporary vector is allocated and the tail is moved twice. @@ -3895,7 +3909,7 @@ unsafe impl<#[may_dangle] T, A: Allocator> Drop for Vec { } #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_const_unstable(feature = "const_default", issue = "67792")] +#[rustc_const_unstable(feature = "const_default", issue = "143894")] impl const Default for Vec { /// Creates an empty `Vec`. /// diff --git a/library/core/src/any.rs b/library/core/src/any.rs index 39cdf6efda07a..38393379a78a7 100644 --- a/library/core/src/any.rs +++ b/library/core/src/any.rs @@ -783,7 +783,7 @@ impl TypeId { // This is a provenance-stripping memcpy. for (i, chunk) in self.data.iter().copied().enumerate() { - let chunk = chunk.expose_provenance().to_ne_bytes(); + let chunk = chunk.addr().to_ne_bytes(); let start = i * chunk.len(); bytes[start..(start + chunk.len())].copy_from_slice(&chunk); } diff --git a/library/core/src/array/mod.rs b/library/core/src/array/mod.rs index 16356f749c92b..62203e132b70c 100644 --- a/library/core/src/array/mod.rs +++ b/library/core/src/array/mod.rs @@ -374,9 +374,10 @@ impl<'a, T, const N: usize> IntoIterator for &'a mut [T; N] { } #[stable(feature = "index_trait_on_arrays", since = "1.50.0")] -impl Index for [T; N] +#[rustc_const_unstable(feature = "const_index", issue = "143775")] +impl const Index for [T; N] where - [T]: Index, + [T]: ~const Index, { type Output = <[T] as Index>::Output; @@ -387,9 +388,10 @@ where } #[stable(feature = "index_trait_on_arrays", since = "1.50.0")] -impl IndexMut for [T; N] +#[rustc_const_unstable(feature = "const_index", issue = "143775")] +impl const IndexMut for [T; N] where - [T]: IndexMut, + [T]: ~const IndexMut, { #[inline] fn index_mut(&mut self, index: I) -> &mut Self::Output { diff --git a/library/core/src/async_iter/async_iter.rs b/library/core/src/async_iter/async_iter.rs index 069c50c2531b0..c21c08320bef6 100644 --- a/library/core/src/async_iter/async_iter.rs +++ b/library/core/src/async_iter/async_iter.rs @@ -28,15 +28,15 @@ pub trait AsyncIterator { /// async iterator state: /// /// - `Poll::Pending` means that this async iterator's next value is not ready - /// yet. Implementations will ensure that the current task will be notified - /// when the next value may be ready. + /// yet. Implementations will ensure that the current task will be notified + /// when the next value may be ready. /// /// - `Poll::Ready(Some(val))` means that the async iterator has successfully - /// produced a value, `val`, and may produce further values on subsequent - /// `poll_next` calls. + /// produced a value, `val`, and may produce further values on subsequent + /// `poll_next` calls. /// /// - `Poll::Ready(None)` means that the async iterator has terminated, and - /// `poll_next` should not be invoked again. + /// `poll_next` should not be invoked again. /// /// # Panics /// diff --git a/library/core/src/cell.rs b/library/core/src/cell.rs index c53a2b2beb4cc..d6d1bf2effae2 100644 --- a/library/core/src/cell.rs +++ b/library/core/src/cell.rs @@ -333,7 +333,7 @@ impl Clone for Cell { } #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_const_unstable(feature = "const_default", issue = "67792")] +#[rustc_const_unstable(feature = "const_default", issue = "143894")] impl const Default for Cell { /// Creates a `Cell`, with the `Default` value for T. #[inline] @@ -1324,7 +1324,7 @@ impl Clone for RefCell { } #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_const_unstable(feature = "const_default", issue = "67792")] +#[rustc_const_unstable(feature = "const_default", issue = "143894")] impl const Default for RefCell { /// Creates a `RefCell`, with the `Default` value for T. #[inline] @@ -2332,7 +2332,7 @@ impl UnsafeCell { } #[stable(feature = "unsafe_cell_default", since = "1.10.0")] -#[rustc_const_unstable(feature = "const_default", issue = "67792")] +#[rustc_const_unstable(feature = "const_default", issue = "143894")] impl const Default for UnsafeCell { /// Creates an `UnsafeCell`, with the `Default` value for T. fn default() -> UnsafeCell { @@ -2437,7 +2437,7 @@ impl SyncUnsafeCell { } #[unstable(feature = "sync_unsafe_cell", issue = "95439")] -#[rustc_const_unstable(feature = "const_default", issue = "67792")] +#[rustc_const_unstable(feature = "const_default", issue = "143894")] impl const Default for SyncUnsafeCell { /// Creates an `SyncUnsafeCell`, with the `Default` value for T. fn default() -> SyncUnsafeCell { diff --git a/library/core/src/cmp.rs b/library/core/src/cmp.rs index 03b120fbf0c34..1b9af10a6fda5 100644 --- a/library/core/src/cmp.rs +++ b/library/core/src/cmp.rs @@ -381,7 +381,8 @@ pub struct AssertParamIsEq { /// /// assert_eq!(2.cmp(&1), Ordering::Greater); /// ``` -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +#[derive(Clone, Copy, Eq, PartialOrd, Ord, Debug, Hash)] +#[derive_const(PartialEq)] #[stable(feature = "rust1", since = "1.0.0")] // This is a lang item only so that `BinOp::Cmp` in MIR can return it. // It has no special behavior, but does require that the three variants @@ -1481,13 +1482,14 @@ pub trait PartialOrd: PartialEq + PointeeSized { } } -fn default_chaining_impl( +fn default_chaining_impl( lhs: &T, rhs: &U, p: impl FnOnce(Ordering) -> bool, ) -> ControlFlow where - T: PartialOrd, + T: PartialOrd + PointeeSized, + U: PointeeSized, { // It's important that this only call `partial_cmp` once, not call `eq` then // one of the relational operators. We don't want to `bcmp`-then-`memcp` a diff --git a/library/core/src/cmp/bytewise.rs b/library/core/src/cmp/bytewise.rs index a06a5227fe285..7d61c9345ecf8 100644 --- a/library/core/src/cmp/bytewise.rs +++ b/library/core/src/cmp/bytewise.rs @@ -17,11 +17,15 @@ use crate::num::NonZero; /// - Neither `Self` nor `Rhs` have provenance, so integer comparisons are correct. /// - `>::{eq,ne}` are equivalent to comparing the bytes. #[rustc_specialization_trait] -pub(crate) unsafe trait BytewiseEq: PartialEq + Sized {} +#[const_trait] +pub(crate) unsafe trait BytewiseEq: + ~const PartialEq + Sized +{ +} macro_rules! is_bytewise_comparable { ($($t:ty),+ $(,)?) => {$( - unsafe impl BytewiseEq for $t {} + unsafe impl const BytewiseEq for $t {} )+}; } diff --git a/library/core/src/default.rs b/library/core/src/default.rs index 4d108b0c18ed3..897267968aacc 100644 --- a/library/core/src/default.rs +++ b/library/core/src/default.rs @@ -104,7 +104,7 @@ use crate::ascii::Char as AsciiChar; #[rustc_diagnostic_item = "Default"] #[stable(feature = "rust1", since = "1.0.0")] #[const_trait] -#[rustc_const_unstable(feature = "const_default", issue = "67792")] +#[rustc_const_unstable(feature = "const_default", issue = "143894")] pub trait Default: Sized { /// Returns the "default value" for a type. /// @@ -151,7 +151,7 @@ pub macro Default($item:item) { macro_rules! default_impl { ($t:ty, $v:expr, $doc:tt) => { #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_default", issue = "67792")] + #[rustc_const_unstable(feature = "const_default", issue = "143894")] impl const Default for $t { #[inline(always)] #[doc = $doc] diff --git a/library/core/src/ffi/c_str.rs b/library/core/src/ffi/c_str.rs index 777bee6ea3ba2..756237c6f4182 100644 --- a/library/core/src/ffi/c_str.rs +++ b/library/core/src/ffi/c_str.rs @@ -142,16 +142,20 @@ pub enum FromBytesWithNulError { } #[stable(feature = "frombyteswithnulerror_impls", since = "1.17.0")] -impl Error for FromBytesWithNulError { - #[allow(deprecated)] - fn description(&self) -> &str { +impl fmt::Display for FromBytesWithNulError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - Self::InteriorNul { .. } => "data provided contains an interior nul byte", - Self::NotNulTerminated => "data provided is not nul terminated", + Self::InteriorNul { position } => { + write!(f, "data provided contains an interior nul byte at byte position {position}") + } + Self::NotNulTerminated => write!(f, "data provided is not nul terminated"), } } } +#[stable(feature = "frombyteswithnulerror_impls", since = "1.17.0")] +impl Error for FromBytesWithNulError {} + /// An error indicating that no nul byte was present. /// /// A slice used to create a [`CStr`] must contain a nul byte somewhere @@ -188,18 +192,6 @@ impl Default for &CStr { } } -#[stable(feature = "frombyteswithnulerror_impls", since = "1.17.0")] -impl fmt::Display for FromBytesWithNulError { - #[allow(deprecated, deprecated_in_future)] - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str(self.description())?; - if let Self::InteriorNul { position } = self { - write!(f, " at byte pos {position}")?; - } - Ok(()) - } -} - #[unstable(feature = "ub_checks", issue = "none")] impl Invariant for &CStr { /** diff --git a/library/core/src/fmt/mod.rs b/library/core/src/fmt/mod.rs index 228a331e1da4d..8ac29e5b07638 100644 --- a/library/core/src/fmt/mod.rs +++ b/library/core/src/fmt/mod.rs @@ -854,7 +854,6 @@ impl Display for Arguments<'_> { /// }"; /// assert_eq!(format!("The origin is: {origin:#?}"), expected); /// ``` - #[stable(feature = "rust1", since = "1.0.0")] #[rustc_on_unimplemented( on( diff --git a/library/core/src/fmt/num.rs b/library/core/src/fmt/num.rs index e1381ace44151..7d41ae45093ea 100644 --- a/library/core/src/fmt/num.rs +++ b/library/core/src/fmt/num.rs @@ -358,7 +358,7 @@ macro_rules! impl_Display { } #[cfg(feature = "optimize_for_size")] { - offset = _inner_slow_integer_to_str(self.unsigned_abs().$conv_fn(), &mut buf.buf); + offset = ${concat(_inner_slow_integer_to_str, $gen_name)}(self.unsigned_abs().$conv_fn(), &mut buf.buf); } // Only difference between signed and unsigned are these 4 lines. if self < 0 { @@ -401,7 +401,7 @@ macro_rules! impl_Display { } #[cfg(feature = "optimize_for_size")] { - offset = _inner_slow_integer_to_str(self.$conv_fn(), &mut buf.buf); + offset = ${concat(_inner_slow_integer_to_str, $gen_name)}(self.$conv_fn(), &mut buf.buf); } // SAFETY: Starting from `offset`, all elements of the slice have been set. unsafe { slice_buffer_to_str(&buf.buf, offset) } @@ -412,7 +412,7 @@ macro_rules! impl_Display { )* #[cfg(feature = "optimize_for_size")] - fn _inner_slow_integer_to_str(mut n: $u, buf: &mut [MaybeUninit::]) -> usize { + fn ${concat(_inner_slow_integer_to_str, $gen_name)}(mut n: $u, buf: &mut [MaybeUninit::]) -> usize { let mut curr = buf.len(); // SAFETY: To show that it's OK to copy into `buf_ptr`, notice that at the beginning @@ -437,7 +437,7 @@ macro_rules! impl_Display { const MAX_DEC_N: usize = $u::MAX.ilog(10) as usize + 1; let mut buf = [MaybeUninit::::uninit(); MAX_DEC_N]; - let offset = _inner_slow_integer_to_str(n, &mut buf); + let offset = ${concat(_inner_slow_integer_to_str, $gen_name)}(n, &mut buf); // SAFETY: Starting from `offset`, all elements of the slice have been set. let buf_slice = unsafe { slice_buffer_to_str(&buf, offset) }; f.pad_integral(is_nonnegative, "", buf_slice) diff --git a/library/core/src/intrinsics/mod.rs b/library/core/src/intrinsics/mod.rs index 15295a3cdf738..e099c28646015 100644 --- a/library/core/src/intrinsics/mod.rs +++ b/library/core/src/intrinsics/mod.rs @@ -916,7 +916,7 @@ pub const unsafe fn arith_offset(dst: *const T, offset: isize) -> *const T; /// # Safety /// /// - `index < PtrMetadata(slice_ptr)`, so the indexing is in-bounds for the slice -/// - the resulting offsetting is in-bounds of the allocated object, which is +/// - the resulting offsetting is in-bounds of the allocation, which is /// always the case for references, but needs to be upheld manually for pointers #[rustc_nounwind] #[rustc_intrinsic] @@ -1385,6 +1385,7 @@ pub unsafe fn fmuladdf128(a: f128, b: f128, c: f128) -> f128; /// /// The stabilized version of this intrinsic is /// [`f16::floor`](../../std/primitive.f16.html#method.floor) +#[rustc_intrinsic_const_stable_indirect] #[rustc_intrinsic] #[rustc_nounwind] pub const unsafe fn floorf16(x: f16) -> f16; @@ -1392,6 +1393,7 @@ pub const unsafe fn floorf16(x: f16) -> f16; /// /// The stabilized version of this intrinsic is /// [`f32::floor`](../../std/primitive.f32.html#method.floor) +#[rustc_intrinsic_const_stable_indirect] #[rustc_intrinsic] #[rustc_nounwind] pub const unsafe fn floorf32(x: f32) -> f32; @@ -1399,6 +1401,7 @@ pub const unsafe fn floorf32(x: f32) -> f32; /// /// The stabilized version of this intrinsic is /// [`f64::floor`](../../std/primitive.f64.html#method.floor) +#[rustc_intrinsic_const_stable_indirect] #[rustc_intrinsic] #[rustc_nounwind] pub const unsafe fn floorf64(x: f64) -> f64; @@ -1406,6 +1409,7 @@ pub const unsafe fn floorf64(x: f64) -> f64; /// /// The stabilized version of this intrinsic is /// [`f128::floor`](../../std/primitive.f128.html#method.floor) +#[rustc_intrinsic_const_stable_indirect] #[rustc_intrinsic] #[rustc_nounwind] pub const unsafe fn floorf128(x: f128) -> f128; @@ -1414,6 +1418,7 @@ pub const unsafe fn floorf128(x: f128) -> f128; /// /// The stabilized version of this intrinsic is /// [`f16::ceil`](../../std/primitive.f16.html#method.ceil) +#[rustc_intrinsic_const_stable_indirect] #[rustc_intrinsic] #[rustc_nounwind] pub const unsafe fn ceilf16(x: f16) -> f16; @@ -1421,6 +1426,7 @@ pub const unsafe fn ceilf16(x: f16) -> f16; /// /// The stabilized version of this intrinsic is /// [`f32::ceil`](../../std/primitive.f32.html#method.ceil) +#[rustc_intrinsic_const_stable_indirect] #[rustc_intrinsic] #[rustc_nounwind] pub const unsafe fn ceilf32(x: f32) -> f32; @@ -1428,6 +1434,7 @@ pub const unsafe fn ceilf32(x: f32) -> f32; /// /// The stabilized version of this intrinsic is /// [`f64::ceil`](../../std/primitive.f64.html#method.ceil) +#[rustc_intrinsic_const_stable_indirect] #[rustc_intrinsic] #[rustc_nounwind] pub const unsafe fn ceilf64(x: f64) -> f64; @@ -1435,6 +1442,7 @@ pub const unsafe fn ceilf64(x: f64) -> f64; /// /// The stabilized version of this intrinsic is /// [`f128::ceil`](../../std/primitive.f128.html#method.ceil) +#[rustc_intrinsic_const_stable_indirect] #[rustc_intrinsic] #[rustc_nounwind] pub const unsafe fn ceilf128(x: f128) -> f128; @@ -1443,6 +1451,7 @@ pub const unsafe fn ceilf128(x: f128) -> f128; /// /// The stabilized version of this intrinsic is /// [`f16::trunc`](../../std/primitive.f16.html#method.trunc) +#[rustc_intrinsic_const_stable_indirect] #[rustc_intrinsic] #[rustc_nounwind] pub const unsafe fn truncf16(x: f16) -> f16; @@ -1450,6 +1459,7 @@ pub const unsafe fn truncf16(x: f16) -> f16; /// /// The stabilized version of this intrinsic is /// [`f32::trunc`](../../std/primitive.f32.html#method.trunc) +#[rustc_intrinsic_const_stable_indirect] #[rustc_intrinsic] #[rustc_nounwind] pub const unsafe fn truncf32(x: f32) -> f32; @@ -1457,6 +1467,7 @@ pub const unsafe fn truncf32(x: f32) -> f32; /// /// The stabilized version of this intrinsic is /// [`f64::trunc`](../../std/primitive.f64.html#method.trunc) +#[rustc_intrinsic_const_stable_indirect] #[rustc_intrinsic] #[rustc_nounwind] pub const unsafe fn truncf64(x: f64) -> f64; @@ -1464,6 +1475,7 @@ pub const unsafe fn truncf64(x: f64) -> f64; /// /// The stabilized version of this intrinsic is /// [`f128::trunc`](../../std/primitive.f128.html#method.trunc) +#[rustc_intrinsic_const_stable_indirect] #[rustc_intrinsic] #[rustc_nounwind] pub const unsafe fn truncf128(x: f128) -> f128; @@ -1473,6 +1485,7 @@ pub const unsafe fn truncf128(x: f128) -> f128; /// /// The stabilized version of this intrinsic is /// [`f16::round_ties_even`](../../std/primitive.f16.html#method.round_ties_even) +#[rustc_intrinsic_const_stable_indirect] #[rustc_intrinsic] #[rustc_nounwind] pub const fn round_ties_even_f16(x: f16) -> f16; @@ -1482,6 +1495,7 @@ pub const fn round_ties_even_f16(x: f16) -> f16; /// /// The stabilized version of this intrinsic is /// [`f32::round_ties_even`](../../std/primitive.f32.html#method.round_ties_even) +#[rustc_intrinsic_const_stable_indirect] #[rustc_intrinsic] #[rustc_nounwind] pub const fn round_ties_even_f32(x: f32) -> f32; @@ -1491,6 +1505,7 @@ pub const fn round_ties_even_f32(x: f32) -> f32; /// /// The stabilized version of this intrinsic is /// [`f64::round_ties_even`](../../std/primitive.f64.html#method.round_ties_even) +#[rustc_intrinsic_const_stable_indirect] #[rustc_intrinsic] #[rustc_nounwind] pub const fn round_ties_even_f64(x: f64) -> f64; @@ -1500,6 +1515,7 @@ pub const fn round_ties_even_f64(x: f64) -> f64; /// /// The stabilized version of this intrinsic is /// [`f128::round_ties_even`](../../std/primitive.f128.html#method.round_ties_even) +#[rustc_intrinsic_const_stable_indirect] #[rustc_intrinsic] #[rustc_nounwind] pub const fn round_ties_even_f128(x: f128) -> f128; @@ -1508,6 +1524,7 @@ pub const fn round_ties_even_f128(x: f128) -> f128; /// /// The stabilized version of this intrinsic is /// [`f16::round`](../../std/primitive.f16.html#method.round) +#[rustc_intrinsic_const_stable_indirect] #[rustc_intrinsic] #[rustc_nounwind] pub const unsafe fn roundf16(x: f16) -> f16; @@ -1515,6 +1532,7 @@ pub const unsafe fn roundf16(x: f16) -> f16; /// /// The stabilized version of this intrinsic is /// [`f32::round`](../../std/primitive.f32.html#method.round) +#[rustc_intrinsic_const_stable_indirect] #[rustc_intrinsic] #[rustc_nounwind] pub const unsafe fn roundf32(x: f32) -> f32; @@ -1522,6 +1540,7 @@ pub const unsafe fn roundf32(x: f32) -> f32; /// /// The stabilized version of this intrinsic is /// [`f64::round`](../../std/primitive.f64.html#method.round) +#[rustc_intrinsic_const_stable_indirect] #[rustc_intrinsic] #[rustc_nounwind] pub const unsafe fn roundf64(x: f64) -> f64; @@ -1529,6 +1548,7 @@ pub const unsafe fn roundf64(x: f64) -> f64; /// /// The stabilized version of this intrinsic is /// [`f128::round`](../../std/primitive.f128.html#method.round) +#[rustc_intrinsic_const_stable_indirect] #[rustc_intrinsic] #[rustc_nounwind] pub const unsafe fn roundf128(x: f128) -> f128; @@ -2214,6 +2234,7 @@ pub const unsafe fn raw_eq(a: &T, b: &T) -> bool; /// [valid]: crate::ptr#safety #[rustc_nounwind] #[rustc_intrinsic] +#[rustc_const_unstable(feature = "const_cmp", issue = "143800")] pub const unsafe fn compare_bytes(left: *const u8, right: *const u8, bytes: usize) -> i32; /// See documentation of [`std::hint::black_box`] for details. @@ -2546,6 +2567,15 @@ pub const unsafe fn const_deallocate(_ptr: *mut u8, _size: usize, _align: usize) // Runtime NOP } +#[rustc_const_unstable(feature = "const_heap", issue = "79597")] +#[rustc_nounwind] +#[rustc_intrinsic] +#[miri::intrinsic_fallback_is_spec] +pub const unsafe fn const_make_global(ptr: *mut u8) -> *const u8 { + // const eval overrides this function; at runtime, it is a NOP. + ptr +} + /// Returns whether we should perform contract-checking at runtime. /// /// This is meant to be similar to the ub_checks intrinsic, in terms diff --git a/library/core/src/io/borrowed_buf.rs b/library/core/src/io/borrowed_buf.rs index 854e03cf18279..088dea7812945 100644 --- a/library/core/src/io/borrowed_buf.rs +++ b/library/core/src/io/borrowed_buf.rs @@ -69,6 +69,23 @@ impl<'data> From<&'data mut [MaybeUninit]> for BorrowedBuf<'data> { } } +/// Creates a new `BorrowedBuf` from a cursor. +/// +/// Use `BorrowedCursor::with_unfilled_buf` instead for a safer alternative. +impl<'data> From> for BorrowedBuf<'data> { + #[inline] + fn from(mut buf: BorrowedCursor<'data>) -> BorrowedBuf<'data> { + let init = buf.init_mut().len(); + BorrowedBuf { + // SAFETY: no initialized byte is ever uninitialized as per + // `BorrowedBuf`'s invariant + buf: unsafe { buf.buf.buf.get_unchecked_mut(buf.buf.filled..) }, + filled: 0, + init, + } + } +} + impl<'data> BorrowedBuf<'data> { /// Returns the total capacity of the buffer. #[inline] @@ -132,7 +149,6 @@ impl<'data> BorrowedBuf<'data> { #[inline] pub fn unfilled<'this>(&'this mut self) -> BorrowedCursor<'this> { BorrowedCursor { - start: self.filled, // SAFETY: we never assign into `BorrowedCursor::buf`, so treating its // lifetime covariantly is safe. buf: unsafe { @@ -188,9 +204,6 @@ pub struct BorrowedCursor<'a> { // we create a `BorrowedCursor`. This is only safe if we never replace `buf` by assigning into // it, so don't do that! buf: &'a mut BorrowedBuf<'a>, - /// The length of the filled portion of the underlying buffer at the time of the cursor's - /// creation. - start: usize, } impl<'a> BorrowedCursor<'a> { @@ -208,7 +221,6 @@ impl<'a> BorrowedCursor<'a> { self.buf, ) }, - start: self.start, } } @@ -218,23 +230,12 @@ impl<'a> BorrowedCursor<'a> { self.buf.capacity() - self.buf.filled } - /// Returns the number of bytes written to this cursor since it was created from a `BorrowedBuf`. + /// Returns the number of bytes written to the `BorrowedBuf` this cursor was created from. /// - /// Note that if this cursor is a reborrowed clone of another, then the count returned is the - /// count written via either cursor, not the count since the cursor was reborrowed. + /// In particular, the count returned is shared by all reborrows of the cursor. #[inline] pub fn written(&self) -> usize { - self.buf.filled - self.start - } - - /// Returns a shared reference to the initialized portion of the cursor. - #[inline] - pub fn init_ref(&self) -> &[u8] { - // SAFETY: We only slice the initialized part of the buffer, which is always valid - unsafe { - let buf = self.buf.buf.get_unchecked(self.buf.filled..self.buf.init); - buf.assume_init_ref() - } + self.buf.filled } /// Returns a mutable reference to the initialized portion of the cursor. @@ -247,15 +248,6 @@ impl<'a> BorrowedCursor<'a> { } } - /// Returns a mutable reference to the uninitialized part of the cursor. - /// - /// It is safe to uninitialize any of these bytes. - #[inline] - pub fn uninit_mut(&mut self) -> &mut [MaybeUninit] { - // SAFETY: always in bounds - unsafe { self.buf.buf.get_unchecked_mut(self.buf.init..) } - } - /// Returns a mutable reference to the whole cursor. /// /// # Safety @@ -308,7 +300,9 @@ impl<'a> BorrowedCursor<'a> { /// Initializes all bytes in the cursor. #[inline] pub fn ensure_init(&mut self) -> &mut Self { - let uninit = self.uninit_mut(); + // SAFETY: always in bounds and we never uninitialize these bytes. + let uninit = unsafe { self.buf.buf.get_unchecked_mut(self.buf.init..) }; + // SAFETY: 0 is a valid value for MaybeUninit and the length matches the allocation // since it is comes from a slice reference. unsafe { @@ -353,4 +347,38 @@ impl<'a> BorrowedCursor<'a> { } self.buf.filled += buf.len(); } + + /// Runs the given closure with a `BorrowedBuf` containing the unfilled part + /// of the cursor. + /// + /// This enables inspecting what was written to the cursor. + /// + /// # Panics + /// + /// Panics if the `BorrowedBuf` given to the closure is replaced by another + /// one. + pub fn with_unfilled_buf(&mut self, f: impl FnOnce(&mut BorrowedBuf<'_>) -> T) -> T { + let mut buf = BorrowedBuf::from(self.reborrow()); + let prev_ptr = buf.buf as *const _; + let res = f(&mut buf); + + // Check that the caller didn't replace the `BorrowedBuf`. + // This is necessary for the safety of the code below: if the check wasn't + // there, one could mark some bytes as initialized even though there aren't. + assert!(core::ptr::addr_eq(prev_ptr, buf.buf)); + + let filled = buf.filled; + let init = buf.init; + + // Update `init` and `filled` fields with what was written to the buffer. + // `self.buf.filled` was the starting length of the `BorrowedBuf`. + // + // SAFETY: These amounts of bytes were initialized/filled in the `BorrowedBuf`, + // and therefore they are initialized/filled in the cursor too, because the + // buffer wasn't replaced. + self.buf.init = self.buf.filled + init; + self.buf.filled += filled; + + res + } } diff --git a/library/core/src/iter/adapters/zip.rs b/library/core/src/iter/adapters/zip.rs index 8090c98e7edb7..c5e199c30821d 100644 --- a/library/core/src/iter/adapters/zip.rs +++ b/library/core/src/iter/adapters/zip.rs @@ -18,7 +18,6 @@ pub struct Zip { // index, len and a_len are only used by the specialized version of zip index: usize, len: usize, - a_len: usize, } impl Zip { pub(in crate::iter) fn new(a: A, b: B) -> Zip { @@ -158,7 +157,6 @@ macro_rules! zip_impl_general_defaults { b, index: 0, // unused len: 0, // unused - a_len: 0, // unused } } @@ -299,9 +297,8 @@ where B: TrustedRandomAccess + Iterator, { fn new(a: A, b: B) -> Self { - let a_len = a.size(); - let len = cmp::min(a_len, b.size()); - Zip { a, b, index: 0, len, a_len } + let len = cmp::min(a.size(), b.size()); + Zip { a, b, index: 0, len } } #[inline] @@ -315,17 +312,6 @@ where unsafe { Some((self.a.__iterator_get_unchecked(i), self.b.__iterator_get_unchecked(i))) } - } else if A::MAY_HAVE_SIDE_EFFECT && self.index < self.a_len { - let i = self.index; - // as above, increment before executing code that may panic - self.index += 1; - self.len += 1; - // match the base implementation's potential side effects - // SAFETY: we just checked that `i` < `self.a.len()` - unsafe { - self.a.__iterator_get_unchecked(i); - } - None } else { None } @@ -371,36 +357,42 @@ where A: DoubleEndedIterator + ExactSizeIterator, B: DoubleEndedIterator + ExactSizeIterator, { - if A::MAY_HAVE_SIDE_EFFECT || B::MAY_HAVE_SIDE_EFFECT { - let sz_a = self.a.size(); - let sz_b = self.b.size(); - // Adjust a, b to equal length, make sure that only the first call - // of `next_back` does this, otherwise we will break the restriction - // on calls to `self.next_back()` after calling `get_unchecked()`. - if sz_a != sz_b { + // No effects when the iterator is exhausted, to reduce the number of + // cases the unsafe code has to handle. + // See #137255 for a case where where too many epicycles lead to unsoundness. + if self.index < self.len { + let old_len = self.len; + + // since get_unchecked and the side-effecting code can execute user code + // which can panic we decrement the counter beforehand + // so that the same index won't be accessed twice, as required by TrustedRandomAccess. + // Additionally this will ensure that the side-effects code won't run a second time. + self.len -= 1; + + // Adjust a, b to equal length if we're iterating backwards. + if A::MAY_HAVE_SIDE_EFFECT || B::MAY_HAVE_SIDE_EFFECT { + // note if some forward-iteration already happened then these aren't the real + // remaining lengths of the inner iterators, so we have to relate them to + // Zip's internal length-tracking. let sz_a = self.a.size(); - if A::MAY_HAVE_SIDE_EFFECT && sz_a > self.len { - for _ in 0..sz_a - self.len { - // since next_back() may panic we increment the counters beforehand - // to keep Zip's state in sync with the underlying iterator source - self.a_len -= 1; - self.a.next_back(); - } - debug_assert_eq!(self.a_len, self.len); - } let sz_b = self.b.size(); - if B::MAY_HAVE_SIDE_EFFECT && sz_b > self.len { - for _ in 0..sz_b - self.len { - self.b.next_back(); + // This condition can and must only be true on the first `next_back` call, + // otherwise we will break the restriction on calls to `self.next_back()` + // after calling `get_unchecked()`. + if sz_a != sz_b && (old_len == sz_a || old_len == sz_b) { + if A::MAY_HAVE_SIDE_EFFECT && sz_a > old_len { + for _ in 0..sz_a - old_len { + self.a.next_back(); + } } + if B::MAY_HAVE_SIDE_EFFECT && sz_b > old_len { + for _ in 0..sz_b - old_len { + self.b.next_back(); + } + } + debug_assert_eq!(self.a.size(), self.b.size()); } } - } - if self.index < self.len { - // since get_unchecked executes code which can panic we increment the counters beforehand - // so that the same index won't be accessed twice, as required by TrustedRandomAccess - self.len -= 1; - self.a_len -= 1; let i = self.len; // SAFETY: `i` is smaller than the previous value of `self.len`, // which is also smaller than or equal to `self.a.len()` and `self.b.len()` diff --git a/library/core/src/iter/sources/empty.rs b/library/core/src/iter/sources/empty.rs index 309962ab70ce1..1844c76e5df7e 100644 --- a/library/core/src/iter/sources/empty.rs +++ b/library/core/src/iter/sources/empty.rs @@ -81,7 +81,7 @@ impl Clone for Empty { // not #[derive] because that adds a Default bound on T, // which isn't necessary. #[stable(feature = "iter_empty", since = "1.2.0")] -#[rustc_const_unstable(feature = "const_default", issue = "67792")] +#[rustc_const_unstable(feature = "const_default", issue = "143894")] impl const Default for Empty { fn default() -> Empty { Empty(marker::PhantomData) diff --git a/library/core/src/iter/traits/collect.rs b/library/core/src/iter/traits/collect.rs index 3bc9cff8072bf..ab27650067980 100644 --- a/library/core/src/iter/traits/collect.rs +++ b/library/core/src/iter/traits/collect.rs @@ -436,7 +436,6 @@ pub trait Extend { /// **For implementors:** For a collection to unsafely rely on this method's safety precondition (that is, /// invoke UB if they are violated), it must implement `extend_reserve` correctly. In other words, /// callers may assume that if they `extend_reserve`ed enough space they can call this method. - // This method is for internal usage only. It is only on the trait because of specialization's limitations. #[unstable(feature = "extend_one_unchecked", issue = "none")] #[doc(hidden)] diff --git a/library/core/src/iter/traits/iterator.rs b/library/core/src/iter/traits/iterator.rs index f296792b1dcb2..10f9d464f7d7f 100644 --- a/library/core/src/iter/traits/iterator.rs +++ b/library/core/src/iter/traits/iterator.rs @@ -3414,10 +3414,10 @@ pub trait Iterator { /// ``` #[stable(feature = "iter_copied", since = "1.36.0")] #[rustc_diagnostic_item = "iter_copied"] - fn copied<'a, T: 'a>(self) -> Copied + fn copied<'a, T>(self) -> Copied where + T: Copy + 'a, Self: Sized + Iterator, - T: Copy, { Copied::new(self) } @@ -3462,10 +3462,10 @@ pub trait Iterator { /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[rustc_diagnostic_item = "iter_cloned"] - fn cloned<'a, T: 'a>(self) -> Cloned + fn cloned<'a, T>(self) -> Cloned where + T: Clone + 'a, Self: Sized + Iterator, - T: Clone, { Cloned::new(self) } diff --git a/library/core/src/lib.rs b/library/core/src/lib.rs index 66bfb757daff9..ef8b499773b03 100644 --- a/library/core/src/lib.rs +++ b/library/core/src/lib.rs @@ -39,9 +39,9 @@ //! return. You should mark your implementation using `#[panic_handler]`. //! //! * `rust_eh_personality` - is used by the failure mechanisms of the -//! compiler. This is often mapped to GCC's personality function, but crates -//! which do not trigger a panic can be assured that this function is never -//! called. The `lang` attribute is called `eh_personality`. +//! compiler. This is often mapped to GCC's personality function, but crates +//! which do not trigger a panic can be assured that this function is never +//! called. The `lang` attribute is called `eh_personality`. #![stable(feature = "core", since = "1.6.0")] #![doc( @@ -103,6 +103,7 @@ #![feature(cfg_select)] #![feature(cfg_target_has_reliable_f16_f128)] #![feature(const_carrying_mul_add)] +#![feature(const_cmp)] #![feature(const_destruct)] #![feature(const_eval_select)] #![feature(core_intrinsics)] @@ -146,6 +147,7 @@ #![feature(const_trait_impl)] #![feature(decl_macro)] #![feature(deprecated_suggestion)] +#![feature(derive_const)] #![feature(doc_cfg)] #![feature(doc_cfg_hide)] #![feature(doc_notable_trait)] @@ -160,6 +162,7 @@ #![feature(lang_items)] #![feature(link_llvm_intrinsics)] #![feature(macro_metavar_expr)] +#![feature(macro_metavar_expr_concat)] #![feature(marker_trait_attr)] #![feature(min_specialization)] #![feature(multiple_supertrait_upcastable)] diff --git a/library/core/src/macros/mod.rs b/library/core/src/macros/mod.rs index 6b9cbb0643586..8ac6ce2242d4e 100644 --- a/library/core/src/macros/mod.rs +++ b/library/core/src/macros/mod.rs @@ -196,16 +196,14 @@ pub macro assert_matches { }, } -/// A macro for defining `#[cfg]` match-like statements. +/// Selects code at compile-time based on `cfg` predicates. /// -/// It is similar to the `if/elif` C preprocessor macro by allowing definition of a cascade of -/// `#[cfg]` cases, emitting the implementation which matches first. +/// This macro evaluates, at compile-time, a series of `cfg` predicates, +/// selects the first that is true, and emits the code guarded by that +/// predicate. The code guarded by other predicates is not emitted. /// -/// This allows you to conveniently provide a long list `#[cfg]`'d blocks of code -/// without having to rewrite each clause multiple times. -/// -/// Trailing `_` wildcard match arms are **optional** and they indicate a fallback branch when -/// all previous declarations do not evaluate to true. +/// An optional trailing `_` wildcard can be used to specify a fallback. If +/// none of the predicates are true, a [`compile_error`] is emitted. /// /// # Example /// @@ -225,7 +223,7 @@ pub macro assert_matches { /// } /// ``` /// -/// If desired, it is possible to return expressions through the use of surrounding braces: +/// The `cfg_select!` macro can also be used in expression position: /// /// ``` /// #![feature(cfg_select)] @@ -1617,7 +1615,7 @@ pub(crate) mod builtin { /// See [the reference] for more info. /// /// [the reference]: ../../../reference/attributes/derive.html - #[unstable(feature = "derive_const", issue = "none")] + #[unstable(feature = "derive_const", issue = "118304")] #[rustc_builtin_macro] pub macro derive_const($item:item) { /* compiler built-in */ diff --git a/library/core/src/marker.rs b/library/core/src/marker.rs index 6531d3ec766a9..45277a1c82d3a 100644 --- a/library/core/src/marker.rs +++ b/library/core/src/marker.rs @@ -863,7 +863,7 @@ impl Clone for PhantomData { } #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_const_unstable(feature = "const_default", issue = "67792")] +#[rustc_const_unstable(feature = "const_default", issue = "143894")] impl const Default for PhantomData { fn default() -> Self { Self diff --git a/library/core/src/mem/maybe_uninit.rs b/library/core/src/mem/maybe_uninit.rs index fc35e54bb0dcc..34d8370da7ecb 100644 --- a/library/core/src/mem/maybe_uninit.rs +++ b/library/core/src/mem/maybe_uninit.rs @@ -1005,28 +1005,6 @@ impl MaybeUninit { } } - /// Deprecated version of [`slice::assume_init_ref`]. - #[unstable(feature = "maybe_uninit_slice", issue = "63569")] - #[deprecated( - note = "replaced by inherent assume_init_ref method; will eventually be removed", - since = "1.83.0" - )] - pub const unsafe fn slice_assume_init_ref(slice: &[Self]) -> &[T] { - // SAFETY: Same for both methods. - unsafe { slice.assume_init_ref() } - } - - /// Deprecated version of [`slice::assume_init_mut`]. - #[unstable(feature = "maybe_uninit_slice", issue = "63569")] - #[deprecated( - note = "replaced by inherent assume_init_mut method; will eventually be removed", - since = "1.83.0" - )] - pub const unsafe fn slice_assume_init_mut(slice: &mut [Self]) -> &mut [T] { - // SAFETY: Same for both methods. - unsafe { slice.assume_init_mut() } - } - /// Gets a pointer to the first element of the array. #[unstable(feature = "maybe_uninit_slice", issue = "63569")] #[inline(always)] @@ -1040,94 +1018,6 @@ impl MaybeUninit { pub const fn slice_as_mut_ptr(this: &mut [MaybeUninit]) -> *mut T { this.as_mut_ptr() as *mut T } - - /// Deprecated version of [`slice::write_copy_of_slice`]. - #[unstable(feature = "maybe_uninit_write_slice", issue = "79995")] - #[deprecated( - note = "replaced by inherent write_copy_of_slice method; will eventually be removed", - since = "1.83.0" - )] - pub fn copy_from_slice<'a>(this: &'a mut [MaybeUninit], src: &[T]) -> &'a mut [T] - where - T: Copy, - { - this.write_copy_of_slice(src) - } - - /// Deprecated version of [`slice::write_clone_of_slice`]. - #[unstable(feature = "maybe_uninit_write_slice", issue = "79995")] - #[deprecated( - note = "replaced by inherent write_clone_of_slice method; will eventually be removed", - since = "1.83.0" - )] - pub fn clone_from_slice<'a>(this: &'a mut [MaybeUninit], src: &[T]) -> &'a mut [T] - where - T: Clone, - { - this.write_clone_of_slice(src) - } - - /// Deprecated version of [`slice::write_filled`]. - #[unstable(feature = "maybe_uninit_fill", issue = "117428")] - #[deprecated( - note = "replaced by inherent write_filled method; will eventually be removed", - since = "1.83.0" - )] - pub fn fill<'a>(this: &'a mut [MaybeUninit], value: T) -> &'a mut [T] - where - T: Clone, - { - this.write_filled(value) - } - - /// Deprecated version of [`slice::write_with`]. - #[unstable(feature = "maybe_uninit_fill", issue = "117428")] - #[deprecated( - note = "replaced by inherent write_with method; will eventually be removed", - since = "1.83.0" - )] - pub fn fill_with<'a, F>(this: &'a mut [MaybeUninit], mut f: F) -> &'a mut [T] - where - F: FnMut() -> T, - { - this.write_with(|_| f()) - } - - /// Deprecated version of [`slice::write_iter`]. - #[unstable(feature = "maybe_uninit_fill", issue = "117428")] - #[deprecated( - note = "replaced by inherent write_iter method; will eventually be removed", - since = "1.83.0" - )] - pub fn fill_from<'a, I>( - this: &'a mut [MaybeUninit], - it: I, - ) -> (&'a mut [T], &'a mut [MaybeUninit]) - where - I: IntoIterator, - { - this.write_iter(it) - } - - /// Deprecated version of [`slice::as_bytes`]. - #[unstable(feature = "maybe_uninit_as_bytes", issue = "93092")] - #[deprecated( - note = "replaced by inherent as_bytes method; will eventually be removed", - since = "1.83.0" - )] - pub fn slice_as_bytes(this: &[MaybeUninit]) -> &[MaybeUninit] { - this.as_bytes() - } - - /// Deprecated version of [`slice::as_bytes_mut`]. - #[unstable(feature = "maybe_uninit_as_bytes", issue = "93092")] - #[deprecated( - note = "replaced by inherent as_bytes_mut method; will eventually be removed", - since = "1.83.0" - )] - pub fn slice_as_bytes_mut(this: &mut [MaybeUninit]) -> &mut [MaybeUninit] { - this.as_bytes_mut() - } } impl [MaybeUninit] { @@ -1304,7 +1194,7 @@ impl [MaybeUninit] { /// Fills a slice with elements returned by calling a closure for each index. /// /// This method uses a closure to create new values. If you'd rather `Clone` a given value, use - /// [`MaybeUninit::fill`]. If you want to use the `Default` trait to generate values, you can + /// [slice::write_filled]. If you want to use the `Default` trait to generate values, you can /// pass [`|_| Default::default()`][Default::default] as the argument. /// /// # Panics @@ -1463,7 +1353,7 @@ impl [MaybeUninit] { /// use std::mem::MaybeUninit; /// /// let mut uninit = [MaybeUninit::::uninit(), MaybeUninit::::uninit()]; - /// let uninit_bytes = MaybeUninit::slice_as_bytes_mut(&mut uninit); + /// let uninit_bytes = uninit.as_bytes_mut(); /// uninit_bytes.write_copy_of_slice(&[0x12, 0x34, 0x56, 0x78]); /// let vals = unsafe { uninit.assume_init_ref() }; /// if cfg!(target_endian = "little") { diff --git a/library/core/src/mem/mod.rs b/library/core/src/mem/mod.rs index 8fb1010fd2014..12113c227131c 100644 --- a/library/core/src/mem/mod.rs +++ b/library/core/src/mem/mod.rs @@ -38,7 +38,7 @@ pub use crate::intrinsics::transmute; /// * If you want to leak memory, see [`Box::leak`]. /// * If you want to obtain a raw pointer to the memory, see [`Box::into_raw`]. /// * If you want to dispose of a value properly, running its destructor, see -/// [`mem::drop`]. +/// [`mem::drop`]. /// /// # Safety /// diff --git a/library/core/src/net/ip_addr.rs b/library/core/src/net/ip_addr.rs index aaa68e8d7d1ad..49a7ae5de5c8f 100644 --- a/library/core/src/net/ip_addr.rs +++ b/library/core/src/net/ip_addr.rs @@ -787,7 +787,6 @@ impl Ipv4Addr { /// [IANA IPv4 Special-Purpose Address Registry]: https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml /// [unspecified address]: Ipv4Addr::UNSPECIFIED /// [broadcast address]: Ipv4Addr::BROADCAST - /// /// # Examples /// diff --git a/library/core/src/num/f128.rs b/library/core/src/num/f128.rs index e4c846ec65a25..8b1e19f6a048d 100644 --- a/library/core/src/num/f128.rs +++ b/library/core/src/num/f128.rs @@ -1456,7 +1456,6 @@ impl f128 { #[rustc_allow_incoherent_impl] #[unstable(feature = "f128", issue = "116909")] #[rustc_const_unstable(feature = "f128", issue = "116909")] - // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] pub const fn floor(self) -> f128 { // SAFETY: intrinsic with no preconditions @@ -1486,7 +1485,6 @@ impl f128 { #[rustc_allow_incoherent_impl] #[unstable(feature = "f128", issue = "116909")] #[rustc_const_unstable(feature = "f128", issue = "116909")] - // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] pub const fn ceil(self) -> f128 { // SAFETY: intrinsic with no preconditions @@ -1522,7 +1520,6 @@ impl f128 { #[rustc_allow_incoherent_impl] #[unstable(feature = "f128", issue = "116909")] #[rustc_const_unstable(feature = "f128", issue = "116909")] - // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] pub const fn round(self) -> f128 { // SAFETY: intrinsic with no preconditions @@ -1556,7 +1553,6 @@ impl f128 { #[rustc_allow_incoherent_impl] #[unstable(feature = "f128", issue = "116909")] #[rustc_const_unstable(feature = "f128", issue = "116909")] - // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] pub const fn round_ties_even(self) -> f128 { intrinsics::round_ties_even_f128(self) @@ -1588,7 +1584,6 @@ impl f128 { #[rustc_allow_incoherent_impl] #[unstable(feature = "f128", issue = "116909")] #[rustc_const_unstable(feature = "f128", issue = "116909")] - // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] pub const fn trunc(self) -> f128 { // SAFETY: intrinsic with no preconditions @@ -1619,7 +1614,6 @@ impl f128 { #[rustc_allow_incoherent_impl] #[unstable(feature = "f128", issue = "116909")] #[rustc_const_unstable(feature = "f128", issue = "116909")] - // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] pub const fn fract(self) -> f128 { self - self.trunc() diff --git a/library/core/src/num/f16.rs b/library/core/src/num/f16.rs index 022aa659534c5..cbae92205c2ba 100644 --- a/library/core/src/num/f16.rs +++ b/library/core/src/num/f16.rs @@ -1431,7 +1431,6 @@ impl f16 { #[rustc_allow_incoherent_impl] #[unstable(feature = "f16", issue = "116909")] #[rustc_const_unstable(feature = "f16", issue = "116909")] - // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] pub const fn floor(self) -> f16 { // SAFETY: intrinsic with no preconditions @@ -1461,7 +1460,6 @@ impl f16 { #[rustc_allow_incoherent_impl] #[unstable(feature = "f16", issue = "116909")] #[rustc_const_unstable(feature = "f16", issue = "116909")] - // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] pub const fn ceil(self) -> f16 { // SAFETY: intrinsic with no preconditions @@ -1497,7 +1495,6 @@ impl f16 { #[rustc_allow_incoherent_impl] #[unstable(feature = "f16", issue = "116909")] #[rustc_const_unstable(feature = "f16", issue = "116909")] - // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] pub const fn round(self) -> f16 { // SAFETY: intrinsic with no preconditions @@ -1531,7 +1528,6 @@ impl f16 { #[rustc_allow_incoherent_impl] #[unstable(feature = "f16", issue = "116909")] #[rustc_const_unstable(feature = "f16", issue = "116909")] - // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] pub const fn round_ties_even(self) -> f16 { intrinsics::round_ties_even_f16(self) @@ -1563,7 +1559,6 @@ impl f16 { #[rustc_allow_incoherent_impl] #[unstable(feature = "f16", issue = "116909")] #[rustc_const_unstable(feature = "f16", issue = "116909")] - // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] pub const fn trunc(self) -> f16 { // SAFETY: intrinsic with no preconditions @@ -1594,7 +1589,6 @@ impl f16 { #[rustc_allow_incoherent_impl] #[unstable(feature = "f16", issue = "116909")] #[rustc_const_unstable(feature = "f16", issue = "116909")] - // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] pub const fn fract(self) -> f16 { self - self.trunc() diff --git a/library/core/src/num/f32.rs b/library/core/src/num/f32.rs index b06ac75104fb7..7a6a0b3d5ce43 100644 --- a/library/core/src/num/f32.rs +++ b/library/core/src/num/f32.rs @@ -1599,7 +1599,6 @@ pub mod math { /// [`f32::floor`]: ../../../std/primitive.f32.html#method.floor #[inline] #[unstable(feature = "core_float_math", issue = "137578")] - #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] pub const fn floor(x: f32) -> f32 { // SAFETY: intrinsic with no preconditions @@ -1630,7 +1629,6 @@ pub mod math { #[doc(alias = "ceiling")] #[must_use = "method returns a new number and does not mutate the original value"] #[unstable(feature = "core_float_math", issue = "137578")] - #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] pub const fn ceil(x: f32) -> f32 { // SAFETY: intrinsic with no preconditions unsafe { intrinsics::ceilf32(x) } @@ -1665,7 +1663,6 @@ pub mod math { #[inline] #[unstable(feature = "core_float_math", issue = "137578")] #[must_use = "method returns a new number and does not mutate the original value"] - #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] pub const fn round(x: f32) -> f32 { // SAFETY: intrinsic with no preconditions unsafe { intrinsics::roundf32(x) } @@ -1699,7 +1696,6 @@ pub mod math { #[inline] #[unstable(feature = "core_float_math", issue = "137578")] #[must_use = "method returns a new number and does not mutate the original value"] - #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] pub const fn round_ties_even(x: f32) -> f32 { intrinsics::round_ties_even_f32(x) } @@ -1730,7 +1726,6 @@ pub mod math { #[doc(alias = "truncate")] #[must_use = "method returns a new number and does not mutate the original value"] #[unstable(feature = "core_float_math", issue = "137578")] - #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] pub const fn trunc(x: f32) -> f32 { // SAFETY: intrinsic with no preconditions unsafe { intrinsics::truncf32(x) } @@ -1760,7 +1755,6 @@ pub mod math { /// [`f32::fract`]: ../../../std/primitive.f32.html#method.fract #[inline] #[unstable(feature = "core_float_math", issue = "137578")] - #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] pub const fn fract(x: f32) -> f32 { x - trunc(x) diff --git a/library/core/src/num/f64.rs b/library/core/src/num/f64.rs index 0a8a767858717..5c3e62262af6c 100644 --- a/library/core/src/num/f64.rs +++ b/library/core/src/num/f64.rs @@ -1597,7 +1597,6 @@ pub mod math { /// [`f64::floor`]: ../../../std/primitive.f64.html#method.floor #[inline] #[unstable(feature = "core_float_math", issue = "137578")] - #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] pub const fn floor(x: f64) -> f64 { // SAFETY: intrinsic with no preconditions @@ -1627,7 +1626,6 @@ pub mod math { #[inline] #[doc(alias = "ceiling")] #[unstable(feature = "core_float_math", issue = "137578")] - #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] pub const fn ceil(x: f64) -> f64 { // SAFETY: intrinsic with no preconditions @@ -1662,7 +1660,6 @@ pub mod math { /// [`f64::round`]: ../../../std/primitive.f64.html#method.round #[inline] #[unstable(feature = "core_float_math", issue = "137578")] - #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] pub const fn round(x: f64) -> f64 { // SAFETY: intrinsic with no preconditions @@ -1696,7 +1693,6 @@ pub mod math { /// [`f64::round_ties_even`]: ../../../std/primitive.f64.html#method.round_ties_even #[inline] #[unstable(feature = "core_float_math", issue = "137578")] - #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] pub const fn round_ties_even(x: f64) -> f64 { intrinsics::round_ties_even_f64(x) @@ -1727,7 +1723,6 @@ pub mod math { #[inline] #[doc(alias = "truncate")] #[unstable(feature = "core_float_math", issue = "137578")] - #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] pub const fn trunc(x: f64) -> f64 { // SAFETY: intrinsic with no preconditions @@ -1758,7 +1753,6 @@ pub mod math { /// [`f64::fract`]: ../../../std/primitive.f64.html#method.fract #[inline] #[unstable(feature = "core_float_math", issue = "137578")] - #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] pub const fn fract(x: f64) -> f64 { x - trunc(x) diff --git a/library/core/src/num/int_log10.rs b/library/core/src/num/int_log10.rs index 28a3f5d880ad7..649a736b6e7b5 100644 --- a/library/core/src/num/int_log10.rs +++ b/library/core/src/num/int_log10.rs @@ -1,5 +1,5 @@ -/// These functions compute the integer logarithm of their type, assuming -/// that someone has already checked that the value is strictly positive. +//! These functions compute the integer logarithm of their type, assuming +//! that someone has already checked that the value is strictly positive. // 0 < val <= u8::MAX #[inline] diff --git a/library/core/src/num/nonzero.rs b/library/core/src/num/nonzero.rs index f26aeaef249a6..56a4ac621b6ea 100644 --- a/library/core/src/num/nonzero.rs +++ b/library/core/src/num/nonzero.rs @@ -204,9 +204,10 @@ impl UseCloned for NonZero where T: ZeroablePrimitive {} impl Copy for NonZero where T: ZeroablePrimitive {} #[stable(feature = "nonzero", since = "1.28.0")] -impl PartialEq for NonZero +#[rustc_const_unstable(feature = "const_cmp", issue = "143800")] +impl const PartialEq for NonZero where - T: ZeroablePrimitive + PartialEq, + T: ZeroablePrimitive + ~const PartialEq, { #[inline] fn eq(&self, other: &Self) -> bool { diff --git a/library/core/src/num/uint_macros.rs b/library/core/src/num/uint_macros.rs index eb3de36512198..2d4c1067703a7 100644 --- a/library/core/src/num/uint_macros.rs +++ b/library/core/src/num/uint_macros.rs @@ -556,7 +556,7 @@ macro_rules! uint_impl { pub const fn strict_add(self, rhs: Self) -> Self { let (a, b) = self.overflowing_add(rhs); if b { overflow_panic::add() } else { a } - } + } /// Unchecked integer addition. Computes `self + rhs`, assuming overflow /// cannot occur. @@ -654,7 +654,7 @@ macro_rules! uint_impl { pub const fn strict_add_signed(self, rhs: $SignedT) -> Self { let (a, b) = self.overflowing_add_signed(rhs); if b { overflow_panic::add() } else { a } - } + } /// Checked integer subtraction. Computes `self - rhs`, returning /// `None` if overflow occurred. @@ -714,7 +714,7 @@ macro_rules! uint_impl { pub const fn strict_sub(self, rhs: Self) -> Self { let (a, b) = self.overflowing_sub(rhs); if b { overflow_panic::sub() } else { a } - } + } /// Unchecked integer subtraction. Computes `self - rhs`, assuming overflow /// cannot occur. @@ -807,6 +807,43 @@ macro_rules! uint_impl { } } + /// Strict subtraction with a signed integer. Computes `self - rhs`, + /// panicking if overflow occurred. + /// + /// # Panics + /// + /// ## Overflow behavior + /// + /// This function will always panic on overflow, regardless of whether overflow checks are enabled. + /// + /// # Examples + /// + /// ``` + /// #![feature(strict_overflow_ops)] + #[doc = concat!("assert_eq!(3", stringify!($SelfT), ".strict_sub_signed(2), 1);")] + /// ``` + /// + /// The following panic because of overflow: + /// + /// ```should_panic + /// #![feature(strict_overflow_ops)] + #[doc = concat!("let _ = 1", stringify!($SelfT), ".strict_sub_signed(2);")] + /// ``` + /// + /// ```should_panic + /// #![feature(strict_overflow_ops)] + #[doc = concat!("let _ = (", stringify!($SelfT), "::MAX).strict_sub_signed(-1);")] + /// ``` + #[unstable(feature = "strict_overflow_ops", issue = "118260")] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + #[track_caller] + pub const fn strict_sub_signed(self, rhs: $SignedT) -> Self { + let (a, b) = self.overflowing_sub_signed(rhs); + if b { overflow_panic::sub() } else { a } + } + #[doc = concat!( "Checked integer subtraction. Computes `self - rhs` and checks if the result fits into an [`", stringify!($SignedT), "`], returning `None` if overflow occurred." @@ -915,7 +952,7 @@ macro_rules! uint_impl { pub const fn strict_mul(self, rhs: Self) -> Self { let (a, b) = self.overflowing_mul(rhs); if b { overflow_panic::mul() } else { a } - } + } /// Unchecked integer multiplication. Computes `self * rhs`, assuming overflow /// cannot occur. diff --git a/library/core/src/num/wrapping.rs b/library/core/src/num/wrapping.rs index 491845f6ef8f4..d696cc9095249 100644 --- a/library/core/src/num/wrapping.rs +++ b/library/core/src/num/wrapping.rs @@ -104,9 +104,9 @@ macro_rules! sh_impl_signed { #[inline] fn shl(self, other: $f) -> Wrapping<$t> { if other < 0 { - Wrapping(self.0.wrapping_shr((-other & self::shift_max::$t as $f) as u32)) + Wrapping(self.0.wrapping_shr(-other as u32)) } else { - Wrapping(self.0.wrapping_shl((other & self::shift_max::$t as $f) as u32)) + Wrapping(self.0.wrapping_shl(other as u32)) } } } @@ -129,9 +129,9 @@ macro_rules! sh_impl_signed { #[inline] fn shr(self, other: $f) -> Wrapping<$t> { if other < 0 { - Wrapping(self.0.wrapping_shl((-other & self::shift_max::$t as $f) as u32)) + Wrapping(self.0.wrapping_shl(-other as u32)) } else { - Wrapping(self.0.wrapping_shr((other & self::shift_max::$t as $f) as u32)) + Wrapping(self.0.wrapping_shr(other as u32)) } } } @@ -157,7 +157,7 @@ macro_rules! sh_impl_unsigned { #[inline] fn shl(self, other: $f) -> Wrapping<$t> { - Wrapping(self.0.wrapping_shl((other & self::shift_max::$t as $f) as u32)) + Wrapping(self.0.wrapping_shl(other as u32)) } } forward_ref_binop! { impl Shl, shl for Wrapping<$t>, $f, @@ -178,7 +178,7 @@ macro_rules! sh_impl_unsigned { #[inline] fn shr(self, other: $f) -> Wrapping<$t> { - Wrapping(self.0.wrapping_shr((other & self::shift_max::$t as $f) as u32)) + Wrapping(self.0.wrapping_shr(other as u32)) } } forward_ref_binop! { impl Shr, shr for Wrapping<$t>, $f, @@ -1062,39 +1062,3 @@ macro_rules! wrapping_int_impl_unsigned { } wrapping_int_impl_unsigned! { usize u8 u16 u32 u64 u128 } - -mod shift_max { - #![allow(non_upper_case_globals)] - - #[cfg(target_pointer_width = "16")] - mod platform { - pub(crate) const usize: u32 = super::u16; - pub(crate) const isize: u32 = super::i16; - } - - #[cfg(target_pointer_width = "32")] - mod platform { - pub(crate) const usize: u32 = super::u32; - pub(crate) const isize: u32 = super::i32; - } - - #[cfg(target_pointer_width = "64")] - mod platform { - pub(crate) const usize: u32 = super::u64; - pub(crate) const isize: u32 = super::i64; - } - - pub(super) const i8: u32 = (1 << 3) - 1; - pub(super) const i16: u32 = (1 << 4) - 1; - pub(super) const i32: u32 = (1 << 5) - 1; - pub(super) const i64: u32 = (1 << 6) - 1; - pub(super) const i128: u32 = (1 << 7) - 1; - pub(super) use self::platform::isize; - - pub(super) const u8: u32 = i8; - pub(super) const u16: u32 = i16; - pub(super) const u32: u32 = i32; - pub(super) const u64: u32 = i64; - pub(super) const u128: u32 = i128; - pub(super) use self::platform::usize; -} diff --git a/library/core/src/ops/function.rs b/library/core/src/ops/function.rs index 763b60d88e510..efc751a094d12 100644 --- a/library/core/src/ops/function.rs +++ b/library/core/src/ops/function.rs @@ -73,7 +73,7 @@ use crate::marker::Tuple; #[fundamental] // so that regex can rely that `&str: !FnMut` #[must_use = "closures are lazy and do nothing unless called"] #[const_trait] -#[rustc_const_unstable(feature = "const_trait_impl", issue = "67792")] +#[rustc_const_unstable(feature = "const_trait_impl", issue = "143874")] pub trait Fn: FnMut { /// Performs the call operation. #[unstable(feature = "fn_traits", issue = "29625")] @@ -161,7 +161,7 @@ pub trait Fn: FnMut { #[fundamental] // so that regex can rely that `&str: !FnMut` #[must_use = "closures are lazy and do nothing unless called"] #[const_trait] -#[rustc_const_unstable(feature = "const_trait_impl", issue = "67792")] +#[rustc_const_unstable(feature = "const_trait_impl", issue = "143874")] pub trait FnMut: FnOnce { /// Performs the call operation. #[unstable(feature = "fn_traits", issue = "29625")] @@ -241,7 +241,7 @@ pub trait FnMut: FnOnce { #[fundamental] // so that regex can rely that `&str: !FnMut` #[must_use = "closures are lazy and do nothing unless called"] #[const_trait] -#[rustc_const_unstable(feature = "const_trait_impl", issue = "67792")] +#[rustc_const_unstable(feature = "const_trait_impl", issue = "143874")] pub trait FnOnce { /// The returned type after the call operator is used. #[lang = "fn_once_output"] @@ -257,7 +257,7 @@ mod impls { use crate::marker::Tuple; #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_trait_impl", issue = "67792")] + #[rustc_const_unstable(feature = "const_trait_impl", issue = "143874")] impl const Fn for &F where F: ~const Fn, @@ -268,7 +268,7 @@ mod impls { } #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_trait_impl", issue = "67792")] + #[rustc_const_unstable(feature = "const_trait_impl", issue = "143874")] impl const FnMut for &F where F: ~const Fn, @@ -279,7 +279,7 @@ mod impls { } #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_trait_impl", issue = "67792")] + #[rustc_const_unstable(feature = "const_trait_impl", issue = "143874")] impl const FnOnce for &F where F: ~const Fn, @@ -292,7 +292,7 @@ mod impls { } #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_trait_impl", issue = "67792")] + #[rustc_const_unstable(feature = "const_trait_impl", issue = "143874")] impl const FnMut for &mut F where F: ~const FnMut, @@ -303,7 +303,7 @@ mod impls { } #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_trait_impl", issue = "67792")] + #[rustc_const_unstable(feature = "const_trait_impl", issue = "143874")] impl const FnOnce for &mut F where F: ~const FnMut, diff --git a/library/core/src/ops/index.rs b/library/core/src/ops/index.rs index 8092fa9eb2fc5..d8489e9a94913 100644 --- a/library/core/src/ops/index.rs +++ b/library/core/src/ops/index.rs @@ -55,6 +55,8 @@ #[doc(alias = "]")] #[doc(alias = "[")] #[doc(alias = "[]")] +#[const_trait] +#[rustc_const_unstable(feature = "const_index", issue = "143775")] pub trait Index { /// The returned type after indexing. #[stable(feature = "rust1", since = "1.0.0")] @@ -165,7 +167,9 @@ see chapter in The Book : Index { +#[rustc_const_unstable(feature = "const_index", issue = "143775")] +#[const_trait] +pub trait IndexMut: ~const Index { /// Performs the mutable indexing (`container[index]`) operation. /// /// # Panics diff --git a/library/core/src/option.rs b/library/core/src/option.rs index cbb1c7c7d8f00..d2365749f6129 100644 --- a/library/core/src/option.rs +++ b/library/core/src/option.rs @@ -577,6 +577,7 @@ #![stable(feature = "rust1", since = "1.0.0")] use crate::iter::{self, FusedIterator, TrustedLen}; +use crate::marker::Destruct; use crate::ops::{self, ControlFlow, Deref, DerefMut}; use crate::panicking::{panic, panic_display}; use crate::pin::Pin; @@ -649,7 +650,8 @@ impl Option { #[must_use] #[inline] #[stable(feature = "is_some_and", since = "1.70.0")] - pub fn is_some_and(self, f: impl FnOnce(T) -> bool) -> bool { + #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")] + pub const fn is_some_and(self, f: impl ~const FnOnce(T) -> bool + ~const Destruct) -> bool { match self { None => false, Some(x) => f(x), @@ -697,7 +699,8 @@ impl Option { #[must_use] #[inline] #[stable(feature = "is_none_or", since = "1.82.0")] - pub fn is_none_or(self, f: impl FnOnce(T) -> bool) -> bool { + #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")] + pub const fn is_none_or(self, f: impl ~const FnOnce(T) -> bool + ~const Destruct) -> bool { match self { None => true, Some(x) => f(x), @@ -1023,7 +1026,12 @@ impl Option { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn unwrap_or(self, default: T) -> T { + #[rustc_allow_const_fn_unstable(const_precise_live_drops)] + #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")] + pub const fn unwrap_or(self, default: T) -> T + where + T: ~const Destruct, + { match self { Some(x) => x, None => default, @@ -1042,9 +1050,10 @@ impl Option { #[inline] #[track_caller] #[stable(feature = "rust1", since = "1.0.0")] - pub fn unwrap_or_else(self, f: F) -> T + #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")] + pub const fn unwrap_or_else(self, f: F) -> T where - F: FnOnce() -> T, + F: ~const FnOnce() -> T + ~const Destruct, { match self { Some(x) => x, @@ -1073,9 +1082,10 @@ impl Option { /// [`FromStr`]: crate::str::FromStr #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn unwrap_or_default(self) -> T + #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")] + pub const fn unwrap_or_default(self) -> T where - T: Default, + T: ~const Default, { match self { Some(x) => x, @@ -1139,9 +1149,10 @@ impl Option { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn map(self, f: F) -> Option + #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")] + pub const fn map(self, f: F) -> Option where - F: FnOnce(T) -> U, + F: ~const FnOnce(T) -> U + ~const Destruct, { match self { Some(x) => Some(f(x)), @@ -1169,7 +1180,11 @@ impl Option { /// ``` #[inline] #[stable(feature = "result_option_inspect", since = "1.76.0")] - pub fn inspect(self, f: F) -> Self { + #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")] + pub const fn inspect(self, f: F) -> Self + where + F: ~const FnOnce(&T) + ~const Destruct, + { if let Some(ref x) = self { f(x); } @@ -1198,9 +1213,11 @@ impl Option { #[inline] #[stable(feature = "rust1", since = "1.0.0")] #[must_use = "if you don't need the returned value, use `if let` instead"] - pub fn map_or(self, default: U, f: F) -> U + #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")] + pub const fn map_or(self, default: U, f: F) -> U where - F: FnOnce(T) -> U, + F: ~const FnOnce(T) -> U + ~const Destruct, + U: ~const Destruct, { match self { Some(t) => f(t), @@ -1243,10 +1260,11 @@ impl Option { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn map_or_else(self, default: D, f: F) -> U + #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")] + pub const fn map_or_else(self, default: D, f: F) -> U where - D: FnOnce() -> U, - F: FnOnce(T) -> U, + D: ~const FnOnce() -> U + ~const Destruct, + F: ~const FnOnce(T) -> U + ~const Destruct, { match self { Some(t) => f(t), @@ -1273,10 +1291,11 @@ impl Option { /// [default value]: Default::default #[inline] #[unstable(feature = "result_option_map_or_default", issue = "138099")] - pub fn map_or_default(self, f: F) -> U + #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")] + pub const fn map_or_default(self, f: F) -> U where - U: Default, - F: FnOnce(T) -> U, + U: ~const Default, + F: ~const FnOnce(T) -> U + ~const Destruct, { match self { Some(t) => f(t), @@ -1307,7 +1326,8 @@ impl Option { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn ok_or(self, err: E) -> Result { + #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")] + pub const fn ok_or(self, err: E) -> Result { match self { Some(v) => Ok(v), None => Err(err), @@ -1332,9 +1352,10 @@ impl Option { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn ok_or_else(self, err: F) -> Result + #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")] + pub const fn ok_or_else(self, err: F) -> Result where - F: FnOnce() -> E, + F: ~const FnOnce() -> E + ~const Destruct, { match self { Some(v) => Ok(v), @@ -1463,7 +1484,12 @@ impl Option { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn and(self, optb: Option) -> Option { + #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")] + pub const fn and(self, optb: Option) -> Option + where + T: ~const Destruct, + U: ~const Destruct, + { match self { Some(_) => optb, None => None, @@ -1502,9 +1528,10 @@ impl Option { #[inline] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_confusables("flat_map", "flatmap")] - pub fn and_then(self, f: F) -> Option + #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")] + pub const fn and_then(self, f: F) -> Option where - F: FnOnce(T) -> Option, + F: ~const FnOnce(T) -> Option + ~const Destruct, { match self { Some(x) => f(x), @@ -1538,9 +1565,11 @@ impl Option { /// [`Some(t)`]: Some #[inline] #[stable(feature = "option_filter", since = "1.27.0")] - pub fn filter

(self, predicate: P) -> Self + #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")] + pub const fn filter

(self, predicate: P) -> Self where - P: FnOnce(&T) -> bool, + P: ~const FnOnce(&T) -> bool + ~const Destruct, + T: ~const Destruct, { if let Some(x) = self { if predicate(&x) { @@ -1579,7 +1608,11 @@ impl Option { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn or(self, optb: Option) -> Option { + #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")] + pub const fn or(self, optb: Option) -> Option + where + T: ~const Destruct, + { match self { x @ Some(_) => x, None => optb, @@ -1601,9 +1634,13 @@ impl Option { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn or_else(self, f: F) -> Option + #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")] + pub const fn or_else(self, f: F) -> Option where - F: FnOnce() -> Option, + F: ~const FnOnce() -> Option + ~const Destruct, + //FIXME(const_hack): this `T: ~const Destruct` is unnecessary, but even precise live drops can't tell + // no value of type `T` gets dropped here + T: ~const Destruct, { match self { x @ Some(_) => x, @@ -1634,7 +1671,11 @@ impl Option { /// ``` #[inline] #[stable(feature = "option_xor", since = "1.37.0")] - pub fn xor(self, optb: Option) -> Option { + #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")] + pub const fn xor(self, optb: Option) -> Option + where + T: ~const Destruct, + { match (self, optb) { (a @ Some(_), None) => a, (None, b @ Some(_)) => b, @@ -1668,7 +1709,11 @@ impl Option { #[must_use = "if you intended to set a value, consider assignment instead"] #[inline] #[stable(feature = "option_insert", since = "1.53.0")] - pub fn insert(&mut self, value: T) -> &mut T { + #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")] + pub const fn insert(&mut self, value: T) -> &mut T + where + T: ~const Destruct, + { *self = Some(value); // SAFETY: the code above just filled the option @@ -1720,9 +1765,10 @@ impl Option { /// ``` #[inline] #[stable(feature = "option_get_or_insert_default", since = "1.83.0")] - pub fn get_or_insert_default(&mut self) -> &mut T + #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")] + pub const fn get_or_insert_default(&mut self) -> &mut T where - T: Default, + T: ~const Default + ~const Destruct, { self.get_or_insert_with(T::default) } @@ -1746,9 +1792,11 @@ impl Option { /// ``` #[inline] #[stable(feature = "option_entry", since = "1.20.0")] - pub fn get_or_insert_with(&mut self, f: F) -> &mut T + #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")] + pub const fn get_or_insert_with(&mut self, f: F) -> &mut T where - F: FnOnce() -> T, + F: ~const FnOnce() -> T + ~const Destruct, + T: ~const Destruct, { if let None = self { *self = Some(f()); @@ -1812,9 +1860,10 @@ impl Option { /// ``` #[inline] #[stable(feature = "option_take_if", since = "1.80.0")] - pub fn take_if

(&mut self, predicate: P) -> Option + #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")] + pub const fn take_if

(&mut self, predicate: P) -> Option where - P: FnOnce(&mut T) -> bool, + P: ~const FnOnce(&mut T) -> bool + ~const Destruct, { if self.as_mut().map_or(false, predicate) { self.take() } else { None } } @@ -1859,7 +1908,12 @@ impl Option { /// assert_eq!(x.zip(z), None); /// ``` #[stable(feature = "option_zip_option", since = "1.46.0")] - pub fn zip(self, other: Option) -> Option<(T, U)> { + #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")] + pub const fn zip(self, other: Option) -> Option<(T, U)> + where + T: ~const Destruct, + U: ~const Destruct, + { match (self, other) { (Some(a), Some(b)) => Some((a, b)), _ => None, @@ -1895,9 +1949,12 @@ impl Option { /// assert_eq!(x.zip_with(None, Point::new), None); /// ``` #[unstable(feature = "option_zip", issue = "70086")] - pub fn zip_with(self, other: Option, f: F) -> Option + #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")] + pub const fn zip_with(self, other: Option, f: F) -> Option where - F: FnOnce(T, U) -> R, + F: ~const FnOnce(T, U) -> R + ~const Destruct, + T: ~const Destruct, + U: ~const Destruct, { match (self, other) { (Some(a), Some(b)) => Some(f(a, b)), @@ -2112,7 +2169,7 @@ where impl crate::clone::UseCloned for Option where T: crate::clone::UseCloned {} #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_const_unstable(feature = "const_default", issue = "67792")] +#[rustc_const_unstable(feature = "const_default", issue = "143894")] impl const Default for Option { /// Returns [`None`][Option::None]. /// @@ -2243,7 +2300,8 @@ impl<'a, T> From<&'a mut Option> for Option<&'a mut T> { #[stable(feature = "rust1", since = "1.0.0")] impl crate::marker::StructuralPartialEq for Option {} #[stable(feature = "rust1", since = "1.0.0")] -impl PartialEq for Option { +#[rustc_const_unstable(feature = "const_cmp", issue = "143800")] +impl const PartialEq for Option { #[inline] fn eq(&self, other: &Self) -> bool { // Spelling out the cases explicitly optimizes better than diff --git a/library/core/src/prelude/v1.rs b/library/core/src/prelude/v1.rs index 7b9e04920d51d..a4be66b90cab3 100644 --- a/library/core/src/prelude/v1.rs +++ b/library/core/src/prelude/v1.rs @@ -80,7 +80,7 @@ pub use crate::macros::builtin::{ alloc_error_handler, bench, derive, global_allocator, test, test_case, }; -#[unstable(feature = "derive_const", issue = "none")] +#[unstable(feature = "derive_const", issue = "118304")] pub use crate::macros::builtin::derive_const; #[unstable( diff --git a/library/core/src/primitive_docs.rs b/library/core/src/primitive_docs.rs index 2c77c55745b46..9a1ba7d1728cb 100644 --- a/library/core/src/primitive_docs.rs +++ b/library/core/src/primitive_docs.rs @@ -1081,13 +1081,11 @@ mod prim_str {} /// * [`Debug`] /// * [`Default`] /// * [`Hash`] -/// * [`Random`] /// * [`From<[T; N]>`][from] /// /// [from]: convert::From /// [`Debug`]: fmt::Debug /// [`Hash`]: hash::Hash -/// [`Random`]: random::Random /// /// The following traits are implemented for tuples of any length. These traits have /// implementations that are automatically generated by the compiler, so are not limited by @@ -1365,7 +1363,6 @@ mod prim_f16 {} /// x = a + b + c + d; // As written /// x = (a + c) + (b + d); // Reordered to shorten critical path and enable vectorization /// ``` - #[stable(feature = "rust1", since = "1.0.0")] mod prim_f32 {} diff --git a/library/core/src/ptr/alignment.rs b/library/core/src/ptr/alignment.rs index 9e811666fa462..b658bbb24eddd 100644 --- a/library/core/src/ptr/alignment.rs +++ b/library/core/src/ptr/alignment.rs @@ -255,7 +255,7 @@ impl hash::Hash for Alignment { /// Returns [`Alignment::MIN`], which is valid for any type. #[unstable(feature = "ptr_alignment_type", issue = "102070")] -#[rustc_const_unstable(feature = "const_default", issue = "67792")] +#[rustc_const_unstable(feature = "const_default", issue = "143894")] impl const Default for Alignment { fn default() -> Alignment { Alignment::MIN diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs index 61e88e7a00583..e389943d499fe 100644 --- a/library/core/src/ptr/const_ptr.rs +++ b/library/core/src/ptr/const_ptr.rs @@ -1635,10 +1635,11 @@ impl *const [T] { /// } /// ``` #[unstable(feature = "slice_ptr_get", issue = "74265")] + #[rustc_const_unstable(feature = "const_index", issue = "143775")] #[inline] - pub unsafe fn get_unchecked(self, index: I) -> *const I::Output + pub const unsafe fn get_unchecked(self, index: I) -> *const I::Output where - I: SliceIndex<[T]>, + I: ~const SliceIndex<[T]>, { // SAFETY: the caller ensures that `self` is dereferenceable and `index` in-bounds. unsafe { index.get_unchecked(self) } diff --git a/library/core/src/ptr/docs/add.md b/library/core/src/ptr/docs/add.md index ae7c7785684c9..6e2e87f5f811b 100644 --- a/library/core/src/ptr/docs/add.md +++ b/library/core/src/ptr/docs/add.md @@ -19,7 +19,7 @@ If any of the following conditions are violated, the result is Undefined Behavio bounds of that allocation. In particular, this range must not "wrap around" the edge of the address space. -Allocated objects can never be larger than `isize::MAX` bytes, so if the computed offset +Allocations can never be larger than `isize::MAX` bytes, so if the computed offset stays in bounds of the allocation, it is guaranteed to satisfy the first requirement. This implies, for instance, that `vec.as_ptr().add(vec.len())` (for `vec: Vec`) is always safe. diff --git a/library/core/src/ptr/docs/as_uninit_slice.md b/library/core/src/ptr/docs/as_uninit_slice.md index c80c04058838f..1113f4748c2df 100644 --- a/library/core/src/ptr/docs/as_uninit_slice.md +++ b/library/core/src/ptr/docs/as_uninit_slice.md @@ -10,24 +10,24 @@ When calling this method, you have to ensure that *either* the pointer is null * all of the following is true: * The pointer must be [valid] for reads for `ptr.len() * size_of::()` many bytes, -and it must be properly aligned. This means in particular: + and it must be properly aligned. This means in particular: * The entire memory range of this slice must be contained within a single [allocation]! -Slices can never span across multiple allocations. + Slices can never span across multiple allocations. * The pointer must be aligned even for zero-length slices. One -reason for this is that enum layout optimizations may rely on references -(including slices of any length) being aligned and non-null to distinguish -them from other data. You can obtain a pointer that is usable as `data` -for zero-length slices using [`NonNull::dangling()`]. + reason for this is that enum layout optimizations may rely on references + (including slices of any length) being aligned and non-null to distinguish + them from other data. You can obtain a pointer that is usable as `data` + for zero-length slices using [`NonNull::dangling()`]. * The total size `ptr.len() * size_of::()` of the slice must be no larger than `isize::MAX`. -See the safety documentation of [`pointer::offset`]. + See the safety documentation of [`pointer::offset`]. * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is -arbitrarily chosen and does not necessarily reflect the actual lifetime of the data. -In particular, while this reference exists, the memory the pointer points to must -not get mutated (except inside `UnsafeCell`). + arbitrarily chosen and does not necessarily reflect the actual lifetime of the data. + In particular, while this reference exists, the memory the pointer points to must + not get mutated (except inside `UnsafeCell`). This applies even if the result of this method is unused! diff --git a/library/core/src/ptr/docs/offset.md b/library/core/src/ptr/docs/offset.md index f2e335a79a5c2..f04f5606ab297 100644 --- a/library/core/src/ptr/docs/offset.md +++ b/library/core/src/ptr/docs/offset.md @@ -16,7 +16,7 @@ bounds of that allocation. In particular, this range must not "wrap around" the of the address space. Note that "range" here refers to a half-open range as usual in Rust, i.e., `self..result` for non-negative offsets and `result..self` for negative offsets. -Allocated objects can never be larger than `isize::MAX` bytes, so if the computed offset +Allocations can never be larger than `isize::MAX` bytes, so if the computed offset stays in bounds of the allocation, it is guaranteed to satisfy the first requirement. This implies, for instance, that `vec.as_ptr().add(vec.len())` (for `vec: Vec`) is always safe. diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs index 0a77caa98eda1..6e537d6c15079 100644 --- a/library/core/src/ptr/mod.rs +++ b/library/core/src/ptr/mod.rs @@ -28,7 +28,8 @@ //! undefined behavior to perform two concurrent accesses to the same location from different //! threads unless both accesses only read from memory. Notice that this explicitly //! includes [`read_volatile`] and [`write_volatile`]: Volatile accesses cannot -//! be used for inter-thread synchronization. +//! be used for inter-thread synchronization, regardless of whether they are acting on +//! Rust memory or not. //! * The result of casting a reference to a pointer is valid for as long as the //! underlying allocation is live and no reference (just raw pointers) is used to //! access the same memory. That is, reference and pointer accesses cannot be @@ -114,6 +115,10 @@ //! fully contiguous (i.e., has no "holes"), there is no guarantee that this //! will not change in the future. //! +//! Allocations must behave like "normal" memory: in particular, reads must not have +//! side-effects, and writes must become visible to other threads using the usual synchronization +//! primitives. +//! //! For any allocation with `base` address, `size`, and a set of //! `addresses`, the following are guaranteed: //! - For all addresses `a` in `addresses`, `a` is in the range `base .. (base + @@ -2023,54 +2028,61 @@ pub const unsafe fn write_unaligned(dst: *mut T, src: T) { } } -/// Performs a volatile read of the value from `src` without moving it. This -/// leaves the memory in `src` unchanged. -/// -/// Volatile operations are intended to act on I/O memory, and are guaranteed -/// to not be elided or reordered by the compiler across other volatile -/// operations. -/// -/// # Notes -/// -/// Rust does not currently have a rigorously and formally defined memory model, -/// so the precise semantics of what "volatile" means here is subject to change -/// over time. That being said, the semantics will almost always end up pretty -/// similar to [C11's definition of volatile][c11]. -/// -/// The compiler shouldn't change the relative order or number of volatile -/// memory operations. However, volatile memory operations on zero-sized types -/// (e.g., if a zero-sized type is passed to `read_volatile`) are noops -/// and may be ignored. -/// -/// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf +/// Performs a volatile read of the value from `src` without moving it. +/// +/// Volatile operations are intended to act on I/O memory. As such, they are considered externally +/// observable events (just like syscalls, but less opaque), and are guaranteed to not be elided or +/// reordered by the compiler across other externally observable events. With this in mind, there +/// are two cases of usage that need to be distinguished: +/// +/// - When a volatile operation is used for memory inside an [allocation], it behaves exactly like +/// [`read`], except for the additional guarantee that it won't be elided or reordered (see +/// above). This implies that the operation will actually access memory and not e.g. be lowered to +/// reusing data from a previous read. Other than that, all the usual rules for memory accesses +/// apply (including provenance). In particular, just like in C, whether an operation is volatile +/// has no bearing whatsoever on questions involving concurrent accesses from multiple threads. +/// Volatile accesses behave exactly like non-atomic accesses in that regard. +/// +/// - Volatile operations, however, may also be used to access memory that is _outside_ of any Rust +/// allocation. In this use-case, the pointer does *not* have to be [valid] for reads. This is +/// typically used for CPU and peripheral registers that must be accessed via an I/O memory +/// mapping, most commonly at fixed addresses reserved by the hardware. These often have special +/// semantics associated to their manipulation, and cannot be used as general purpose memory. +/// Here, any address value is possible, including 0 and [`usize::MAX`], so long as the semantics +/// of such a read are well-defined by the target hardware. The provenance of the pointer is +/// irrelevant, and it can be created with [`without_provenance`]. The access must not trap. It +/// can cause side-effects, but those must not affect Rust-allocated memory in any way. This +/// access is still not considered [atomic], and as such it cannot be used for inter-thread +/// synchronization. +/// +/// Note that volatile memory operations where T is a zero-sized type are noops and may be ignored. +/// +/// [allocation]: crate::ptr#allocated-object +/// [atomic]: crate::sync::atomic#memory-model-for-atomic-accesses /// /// # Safety /// +/// Like [`read`], `read_volatile` creates a bitwise copy of `T`, regardless of whether `T` is +/// [`Copy`]. If `T` is not [`Copy`], using both the returned value and the value at `*src` can +/// [violate memory safety][read-ownership]. However, storing non-[`Copy`] types in volatile memory +/// is almost certainly incorrect. +/// /// Behavior is undefined if any of the following conditions are violated: /// -/// * `src` must be [valid] for reads. +/// * `src` must be either [valid] for reads, or it must point to memory outside of all Rust +/// allocations and reading from that memory must: +/// - not trap, and +/// - not cause any memory inside a Rust allocation to be modified. /// /// * `src` must be properly aligned. /// -/// * `src` must point to a properly initialized value of type `T`. -/// -/// Like [`read`], `read_volatile` creates a bitwise copy of `T`, regardless of -/// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned -/// value and the value at `*src` can [violate memory safety][read-ownership]. -/// However, storing non-[`Copy`] types in volatile memory is almost certainly -/// incorrect. +/// * Reading from `src` must produce a properly initialized value of type `T`. /// /// Note that even if `T` has size `0`, the pointer must be properly aligned. /// /// [valid]: self#safety /// [read-ownership]: read#ownership-of-the-returned-value /// -/// Just like in C, whether an operation is volatile has no bearing whatsoever -/// on questions involving concurrent access from multiple threads. Volatile -/// accesses behave exactly like non-atomic accesses in that regard. In particular, -/// a race between a `read_volatile` and any write operation to the same location -/// is undefined behavior. -/// /// # Examples /// /// Basic usage: @@ -2093,50 +2105,63 @@ pub unsafe fn read_volatile(src: *const T) -> T { unsafe { ub_checks::assert_unsafe_precondition!( check_language_ub, - "ptr::read_volatile requires that the pointer argument is aligned and non-null", + "ptr::read_volatile requires that the pointer argument is aligned", ( addr: *const () = src as *const (), align: usize = align_of::(), - is_zst: bool = T::IS_ZST, - ) => ub_checks::maybe_is_aligned_and_not_null(addr, align, is_zst) + ) => ub_checks::maybe_is_aligned(addr, align) ); intrinsics::volatile_load(src) } } -/// Performs a volatile write of a memory location with the given value without -/// reading or dropping the old value. -/// -/// Volatile operations are intended to act on I/O memory, and are guaranteed -/// to not be elided or reordered by the compiler across other volatile -/// operations. -/// -/// `write_volatile` does not drop the contents of `dst`. This is safe, but it -/// could leak allocations or resources, so care should be taken not to overwrite -/// an object that should be dropped. -/// -/// Additionally, it does not drop `src`. Semantically, `src` is moved into the -/// location pointed to by `dst`. -/// -/// # Notes -/// -/// Rust does not currently have a rigorously and formally defined memory model, -/// so the precise semantics of what "volatile" means here is subject to change -/// over time. That being said, the semantics will almost always end up pretty -/// similar to [C11's definition of volatile][c11]. -/// -/// The compiler shouldn't change the relative order or number of volatile -/// memory operations. However, volatile memory operations on zero-sized types -/// (e.g., if a zero-sized type is passed to `write_volatile`) are noops -/// and may be ignored. -/// -/// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf +/// Performs a volatile write of a memory location with the given value without reading or dropping +/// the old value. +/// +/// Volatile operations are intended to act on I/O memory. As such, they are considered externally +/// observable events (just like syscalls), and are guaranteed to not be elided or reordered by the +/// compiler across other externally observable events. With this in mind, there are two cases of +/// usage that need to be distinguished: +/// +/// - When a volatile operation is used for memory inside an [allocation], it behaves exactly like +/// [`write`][write()], except for the additional guarantee that it won't be elided or reordered +/// (see above). This implies that the operation will actually access memory and not e.g. be +/// lowered to a register access. Other than that, all the usual rules for memory accesses apply +/// (including provenance). In particular, just like in C, whether an operation is volatile has no +/// bearing whatsoever on questions involving concurrent access from multiple threads. Volatile +/// accesses behave exactly like non-atomic accesses in that regard. +/// +/// - Volatile operations, however, may also be used to access memory that is _outside_ of any Rust +/// allocation. In this use-case, the pointer does *not* have to be [valid] for writes. This is +/// typically used for CPU and peripheral registers that must be accessed via an I/O memory +/// mapping, most commonly at fixed addresses reserved by the hardware. These often have special +/// semantics associated to their manipulation, and cannot be used as general purpose memory. +/// Here, any address value is possible, including 0 and [`usize::MAX`], so long as the semantics +/// of such a write are well-defined by the target hardware. The provenance of the pointer is +/// irrelevant, and it can be created with [`without_provenance`]. The access must not trap. It +/// can cause side-effects, but those must not affect Rust-allocated memory in any way. This +/// access is still not considered [atomic], and as such it cannot be used for inter-thread +/// synchronization. +/// +/// Note that volatile memory operations on zero-sized types (e.g., if a zero-sized type is passed +/// to `write_volatile`) are noops and may be ignored. +/// +/// `write_volatile` does not drop the contents of `dst`. This is safe, but it could leak +/// allocations or resources, so care should be taken not to overwrite an object that should be +/// dropped when operating on Rust memory. Additionally, it does not drop `src`. Semantically, `src` +/// is moved into the location pointed to by `dst`. +/// +/// [allocation]: crate::ptr#allocated-object +/// [atomic]: crate::sync::atomic#memory-model-for-atomic-accesses /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// -/// * `dst` must be [valid] for writes. +/// * `dst` must be either [valid] for writes, or it must point to memory outside of all Rust +/// allocations and writing to that memory must: +/// - not trap, and +/// - not cause any memory inside a Rust allocation to be modified. /// /// * `dst` must be properly aligned. /// @@ -2144,12 +2169,6 @@ pub unsafe fn read_volatile(src: *const T) -> T { /// /// [valid]: self#safety /// -/// Just like in C, whether an operation is volatile has no bearing whatsoever -/// on questions involving concurrent access from multiple threads. Volatile -/// accesses behave exactly like non-atomic accesses in that regard. In particular, -/// a race between a `write_volatile` and any other operation (reading or writing) -/// on the same location is undefined behavior. -/// /// # Examples /// /// Basic usage: @@ -2174,12 +2193,11 @@ pub unsafe fn write_volatile(dst: *mut T, src: T) { unsafe { ub_checks::assert_unsafe_precondition!( check_language_ub, - "ptr::write_volatile requires that the pointer argument is aligned and non-null", + "ptr::write_volatile requires that the pointer argument is aligned", ( addr: *mut () = dst as *mut (), align: usize = align_of::(), - is_zst: bool = T::IS_ZST, - ) => ub_checks::maybe_is_aligned_and_not_null(addr, align, is_zst) + ) => ub_checks::maybe_is_aligned(addr, align) ); intrinsics::volatile_store(dst, src); } diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs index c1bd14c8b630c..4d7e0b96b3532 100644 --- a/library/core/src/ptr/mut_ptr.rs +++ b/library/core/src/ptr/mut_ptr.rs @@ -2000,10 +2000,11 @@ impl *mut [T] { /// } /// ``` #[unstable(feature = "slice_ptr_get", issue = "74265")] + #[rustc_const_unstable(feature = "const_index", issue = "143775")] #[inline(always)] - pub unsafe fn get_unchecked_mut(self, index: I) -> *mut I::Output + pub const unsafe fn get_unchecked_mut(self, index: I) -> *mut I::Output where - I: SliceIndex<[T]>, + I: ~const SliceIndex<[T]>, { // SAFETY: the caller ensures that `self` is dereferenceable and `index` in-bounds. unsafe { index.get_unchecked_mut(self) } diff --git a/library/core/src/ptr/non_null.rs b/library/core/src/ptr/non_null.rs index 53ee523693a7b..f932de4c05250 100644 --- a/library/core/src/ptr/non_null.rs +++ b/library/core/src/ptr/non_null.rs @@ -1796,11 +1796,12 @@ impl NonNull<[T]> { /// } /// ``` #[unstable(feature = "slice_ptr_get", issue = "74265")] + #[rustc_const_unstable(feature = "const_index", issue = "143775")] #[inline] #[requires(ub_checks::can_dereference(self.as_ptr()))] // Ensure self can be dereferenced - pub unsafe fn get_unchecked_mut(self, index: I) -> NonNull + pub const unsafe fn get_unchecked_mut(self, index: I) -> NonNull where - I: SliceIndex<[T]>, + I: ~const SliceIndex<[T]>, { // SAFETY: the caller ensures that `self` is dereferenceable and `index` in-bounds. // As a consequence, the resulting pointer cannot be null. diff --git a/library/core/src/random.rs b/library/core/src/random.rs index 051fe26086389..8a51fb289d8f3 100644 --- a/library/core/src/random.rs +++ b/library/core/src/random.rs @@ -1,48 +1,46 @@ //! Random value generation. -//! -//! The [`Random`] trait allows generating a random value for a type using a -//! given [`RandomSource`]. + +use crate::range::RangeFull; /// A source of randomness. #[unstable(feature = "random", issue = "130703")] pub trait RandomSource { /// Fills `bytes` with random bytes. + /// + /// Note that calling `fill_bytes` multiple times is not equivalent to calling `fill_bytes` once + /// with a larger buffer. A `RandomSource` is allowed to return different bytes for those two + /// cases. For instance, this allows a `RandomSource` to generate a word at a time and throw + /// part of it away if not needed. fn fill_bytes(&mut self, bytes: &mut [u8]); } -/// A trait for getting a random value for a type. -/// -/// **Warning:** Be careful when manipulating random values! The -/// [`random`](Random::random) method on integers samples them with a uniform -/// distribution, so a value of 1 is just as likely as [`i32::MAX`]. By using -/// modulo operations, some of the resulting values can become more likely than -/// others. Use audited crates when in doubt. +/// A trait representing a distribution of random values for a type. #[unstable(feature = "random", issue = "130703")] -pub trait Random: Sized { - /// Generates a random value. - fn random(source: &mut (impl RandomSource + ?Sized)) -> Self; +pub trait Distribution { + /// Samples a random value from the distribution, using the specified random source. + fn sample(&self, source: &mut (impl RandomSource + ?Sized)) -> T; +} + +impl> Distribution for &DT { + fn sample(&self, source: &mut (impl RandomSource + ?Sized)) -> T { + (*self).sample(source) + } } -impl Random for bool { - fn random(source: &mut (impl RandomSource + ?Sized)) -> Self { - u8::random(source) & 1 == 1 +impl Distribution for RangeFull { + fn sample(&self, source: &mut (impl RandomSource + ?Sized)) -> bool { + let byte: u8 = RangeFull.sample(source); + byte & 1 == 1 } } macro_rules! impl_primitive { ($t:ty) => { - impl Random for $t { - /// Generates a random value. - /// - /// **Warning:** Be careful when manipulating the resulting value! This - /// method samples according to a uniform distribution, so a value of 1 is - /// just as likely as [`MAX`](Self::MAX). By using modulo operations, some - /// values can become more likely than others. Use audited crates when in - /// doubt. - fn random(source: &mut (impl RandomSource + ?Sized)) -> Self { - let mut bytes = (0 as Self).to_ne_bytes(); + impl Distribution<$t> for RangeFull { + fn sample(&self, source: &mut (impl RandomSource + ?Sized)) -> $t { + let mut bytes = (0 as $t).to_ne_bytes(); source.fill_bytes(&mut bytes); - Self::from_ne_bytes(bytes) + <$t>::from_ne_bytes(bytes) } } }; diff --git a/library/core/src/range.rs b/library/core/src/range.rs index 2276112a27bb3..5cd7956291ca2 100644 --- a/library/core/src/range.rs +++ b/library/core/src/range.rs @@ -186,14 +186,17 @@ impl IntoBounds for Range { } #[unstable(feature = "new_range_api", issue = "125687")] -impl From> for legacy::Range { +#[rustc_const_unstable(feature = "const_index", issue = "143775")] +impl const From> for legacy::Range { #[inline] fn from(value: Range) -> Self { Self { start: value.start, end: value.end } } } + #[unstable(feature = "new_range_api", issue = "125687")] -impl From> for Range { +#[rustc_const_unstable(feature = "const_index", issue = "143775")] +impl const From> for Range { #[inline] fn from(value: legacy::Range) -> Self { Self { start: value.start, end: value.end } @@ -362,7 +365,8 @@ impl IntoBounds for RangeInclusive { } #[unstable(feature = "new_range_api", issue = "125687")] -impl From> for legacy::RangeInclusive { +#[rustc_const_unstable(feature = "const_index", issue = "143775")] +impl const From> for legacy::RangeInclusive { #[inline] fn from(value: RangeInclusive) -> Self { Self::new(value.start, value.end) @@ -506,14 +510,16 @@ impl IntoBounds for RangeFrom { } #[unstable(feature = "new_range_api", issue = "125687")] -impl From> for legacy::RangeFrom { +#[rustc_const_unstable(feature = "const_index", issue = "143775")] +impl const From> for legacy::RangeFrom { #[inline] fn from(value: RangeFrom) -> Self { Self { start: value.start } } } #[unstable(feature = "new_range_api", issue = "125687")] -impl From> for RangeFrom { +#[rustc_const_unstable(feature = "const_index", issue = "143775")] +impl const From> for RangeFrom { #[inline] fn from(value: legacy::RangeFrom) -> Self { Self { start: value.start } diff --git a/library/core/src/slice/cmp.rs b/library/core/src/slice/cmp.rs index 5ce72b46eee36..1eda8bc1bec40 100644 --- a/library/core/src/slice/cmp.rs +++ b/library/core/src/slice/cmp.rs @@ -8,9 +8,10 @@ use crate::num::NonZero; use crate::ops::ControlFlow; #[stable(feature = "rust1", since = "1.0.0")] -impl PartialEq<[U]> for [T] +#[rustc_const_unstable(feature = "const_cmp", issue = "143800")] +impl const PartialEq<[U]> for [T] where - T: PartialEq, + T: ~const PartialEq, { fn eq(&self, other: &[U]) -> bool { SlicePartialEq::equal(self, other) @@ -94,6 +95,8 @@ impl PartialOrd for [T] { #[doc(hidden)] // intermediate trait for specialization of slice's PartialEq +#[const_trait] +#[rustc_const_unstable(feature = "const_cmp", issue = "143800")] trait SlicePartialEq { fn equal(&self, other: &[B]) -> bool; @@ -103,9 +106,10 @@ trait SlicePartialEq { } // Generic slice equality -impl SlicePartialEq for [A] +#[rustc_const_unstable(feature = "const_cmp", issue = "143800")] +impl const SlicePartialEq for [A] where - A: PartialEq, + A: ~const PartialEq, { default fn equal(&self, other: &[B]) -> bool { if self.len() != other.len() { @@ -115,11 +119,14 @@ where // Implemented as explicit indexing rather // than zipped iterators for performance reasons. // See PR https://github.com/rust-lang/rust/pull/116846 - for idx in 0..self.len() { + // FIXME(const_hack): make this a `for idx in 0..self.len()` loop. + let mut idx = 0; + while idx < self.len() { // bound checks are optimized away if self[idx] != other[idx] { return false; } + idx += 1; } true @@ -128,9 +135,10 @@ where // When each element can be compared byte-wise, we can compare all the bytes // from the whole size in one call to the intrinsics. -impl SlicePartialEq for [A] +#[rustc_const_unstable(feature = "const_cmp", issue = "143800")] +impl const SlicePartialEq for [A] where - A: BytewiseEq, + A: ~const BytewiseEq, { fn equal(&self, other: &[B]) -> bool { if self.len() != other.len() { diff --git a/library/core/src/slice/index.rs b/library/core/src/slice/index.rs index f725c3fdd94cc..322b3580eded2 100644 --- a/library/core/src/slice/index.rs +++ b/library/core/src/slice/index.rs @@ -6,9 +6,10 @@ use crate::ub_checks::assert_unsafe_precondition; use crate::{ops, range}; #[stable(feature = "rust1", since = "1.0.0")] -impl ops::Index for [T] +#[rustc_const_unstable(feature = "const_index", issue = "143775")] +impl const ops::Index for [T] where - I: SliceIndex<[T]>, + I: ~const SliceIndex<[T]>, { type Output = I::Output; @@ -19,9 +20,10 @@ where } #[stable(feature = "rust1", since = "1.0.0")] -impl ops::IndexMut for [T] +#[rustc_const_unstable(feature = "const_index", issue = "143775")] +impl const ops::IndexMut for [T] where - I: SliceIndex<[T]>, + I: ~const SliceIndex<[T]>, { #[inline(always)] fn index_mut(&mut self, index: I) -> &mut I::Output { @@ -158,6 +160,8 @@ mod private_slice_index { message = "the type `{T}` cannot be indexed by `{Self}`", label = "slice indices are of type `usize` or ranges of `usize`" )] +#[const_trait] +#[rustc_const_unstable(feature = "const_index", issue = "143775")] pub unsafe trait SliceIndex: private_slice_index::Sealed { /// The output type returned by methods. #[stable(feature = "slice_get_slice", since = "1.28.0")] @@ -208,7 +212,8 @@ pub unsafe trait SliceIndex: private_slice_index::Sealed { /// The methods `index` and `index_mut` panic if the index is out of bounds. #[stable(feature = "slice_get_slice_impls", since = "1.15.0")] -unsafe impl SliceIndex<[T]> for usize { +#[rustc_const_unstable(feature = "const_index", issue = "143775")] +unsafe impl const SliceIndex<[T]> for usize { type Output = T; #[inline] @@ -278,7 +283,8 @@ unsafe impl SliceIndex<[T]> for usize { /// Because `IndexRange` guarantees `start <= end`, fewer checks are needed here /// than there are for a general `Range` (which might be `100..3`). -unsafe impl SliceIndex<[T]> for ops::IndexRange { +#[rustc_const_unstable(feature = "const_index", issue = "143775")] +unsafe impl const SliceIndex<[T]> for ops::IndexRange { type Output = [T]; #[inline] @@ -354,7 +360,8 @@ unsafe impl SliceIndex<[T]> for ops::IndexRange { /// - the start of the range is greater than the end of the range or /// - the end of the range is out of bounds. #[stable(feature = "slice_get_slice_impls", since = "1.15.0")] -unsafe impl SliceIndex<[T]> for ops::Range { +#[rustc_const_unstable(feature = "const_index", issue = "143775")] +unsafe impl const SliceIndex<[T]> for ops::Range { type Output = [T]; #[inline] @@ -453,7 +460,8 @@ unsafe impl SliceIndex<[T]> for ops::Range { } #[unstable(feature = "new_range_api", issue = "125687")] -unsafe impl SliceIndex<[T]> for range::Range { +#[rustc_const_unstable(feature = "const_index", issue = "143775")] +unsafe impl const SliceIndex<[T]> for range::Range { type Output = [T]; #[inline] @@ -491,7 +499,8 @@ unsafe impl SliceIndex<[T]> for range::Range { /// The methods `index` and `index_mut` panic if the end of the range is out of bounds. #[stable(feature = "slice_get_slice_impls", since = "1.15.0")] -unsafe impl SliceIndex<[T]> for ops::RangeTo { +#[rustc_const_unstable(feature = "const_index", issue = "143775")] +unsafe impl const SliceIndex<[T]> for ops::RangeTo { type Output = [T]; #[inline] @@ -529,7 +538,8 @@ unsafe impl SliceIndex<[T]> for ops::RangeTo { /// The methods `index` and `index_mut` panic if the start of the range is out of bounds. #[stable(feature = "slice_get_slice_impls", since = "1.15.0")] -unsafe impl SliceIndex<[T]> for ops::RangeFrom { +#[rustc_const_unstable(feature = "const_index", issue = "143775")] +unsafe impl const SliceIndex<[T]> for ops::RangeFrom { type Output = [T]; #[inline] @@ -574,7 +584,8 @@ unsafe impl SliceIndex<[T]> for ops::RangeFrom { } #[unstable(feature = "new_range_api", issue = "125687")] -unsafe impl SliceIndex<[T]> for range::RangeFrom { +#[rustc_const_unstable(feature = "const_index", issue = "143775")] +unsafe impl const SliceIndex<[T]> for range::RangeFrom { type Output = [T]; #[inline] @@ -611,7 +622,8 @@ unsafe impl SliceIndex<[T]> for range::RangeFrom { } #[stable(feature = "slice_get_slice_impls", since = "1.15.0")] -unsafe impl SliceIndex<[T]> for ops::RangeFull { +#[rustc_const_unstable(feature = "const_index", issue = "143775")] +unsafe impl const SliceIndex<[T]> for ops::RangeFull { type Output = [T]; #[inline] @@ -650,7 +662,8 @@ unsafe impl SliceIndex<[T]> for ops::RangeFull { /// - the start of the range is greater than the end of the range or /// - the end of the range is out of bounds. #[stable(feature = "inclusive_range", since = "1.26.0")] -unsafe impl SliceIndex<[T]> for ops::RangeInclusive { +#[rustc_const_unstable(feature = "const_index", issue = "143775")] +unsafe impl const SliceIndex<[T]> for ops::RangeInclusive { type Output = [T]; #[inline] @@ -693,7 +706,8 @@ unsafe impl SliceIndex<[T]> for ops::RangeInclusive { } #[unstable(feature = "new_range_api", issue = "125687")] -unsafe impl SliceIndex<[T]> for range::RangeInclusive { +#[rustc_const_unstable(feature = "const_index", issue = "143775")] +unsafe impl const SliceIndex<[T]> for range::RangeInclusive { type Output = [T]; #[inline] @@ -731,7 +745,8 @@ unsafe impl SliceIndex<[T]> for range::RangeInclusive { /// The methods `index` and `index_mut` panic if the end of the range is out of bounds. #[stable(feature = "inclusive_range", since = "1.26.0")] -unsafe impl SliceIndex<[T]> for ops::RangeToInclusive { +#[rustc_const_unstable(feature = "const_index", issue = "143775")] +unsafe impl const SliceIndex<[T]> for ops::RangeToInclusive { type Output = [T]; #[inline] diff --git a/library/core/src/slice/mod.rs b/library/core/src/slice/mod.rs index 40602fa4c5a50..1a38ad5cd61fa 100644 --- a/library/core/src/slice/mod.rs +++ b/library/core/src/slice/mod.rs @@ -572,9 +572,10 @@ impl [T] { #[rustc_no_implicit_autorefs] #[inline] #[must_use] - pub fn get(&self, index: I) -> Option<&I::Output> + #[rustc_const_unstable(feature = "const_index", issue = "143775")] + pub const fn get(&self, index: I) -> Option<&I::Output> where - I: SliceIndex, + I: ~const SliceIndex, { index.get(self) } @@ -598,9 +599,10 @@ impl [T] { #[rustc_no_implicit_autorefs] #[inline] #[must_use] - pub fn get_mut(&mut self, index: I) -> Option<&mut I::Output> + #[rustc_const_unstable(feature = "const_index", issue = "143775")] + pub const fn get_mut(&mut self, index: I) -> Option<&mut I::Output> where - I: SliceIndex, + I: ~const SliceIndex, { index.get_mut(self) } @@ -637,9 +639,10 @@ impl [T] { #[inline] #[must_use] #[track_caller] - pub unsafe fn get_unchecked(&self, index: I) -> &I::Output + #[rustc_const_unstable(feature = "const_index", issue = "143775")] + pub const unsafe fn get_unchecked(&self, index: I) -> &I::Output where - I: SliceIndex, + I: ~const SliceIndex, { // SAFETY: the caller must uphold most of the safety requirements for `get_unchecked`; // the slice is dereferenceable because `self` is a safe reference. @@ -681,9 +684,10 @@ impl [T] { #[inline] #[must_use] #[track_caller] - pub unsafe fn get_unchecked_mut(&mut self, index: I) -> &mut I::Output + #[rustc_const_unstable(feature = "const_index", issue = "143775")] + pub const unsafe fn get_unchecked_mut(&mut self, index: I) -> &mut I::Output where - I: SliceIndex, + I: ~const SliceIndex, { // SAFETY: the caller must uphold the safety requirements for `get_unchecked_mut`; // the slice is dereferenceable because `self` is a safe reference. @@ -971,7 +975,7 @@ impl [T] { /// assert!(v == [3, 2, 1]); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_slice_reverse", issue = "135120")] + #[rustc_const_stable(feature = "const_slice_reverse", since = "CURRENT_RUSTC_VERSION")] #[inline] pub const fn reverse(&mut self) { let half_len = self.len() / 2; @@ -1004,6 +1008,7 @@ impl [T] { // this check tells LLVM that the indexing below is // in-bounds. Then after inlining -- once the actual // lengths of the slices are known -- it's removed. + // FIXME(const_trait_impl) replace with let (a, b) = (&mut a[..n], &mut b[..n]); let (a, _) = a.split_at_mut(n); let (b, _) = b.split_at_mut(n); @@ -5262,7 +5267,7 @@ where } #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_const_unstable(feature = "const_default", issue = "67792")] +#[rustc_const_unstable(feature = "const_default", issue = "143894")] impl const Default for &[T] { /// Creates an empty slice. fn default() -> Self { @@ -5271,7 +5276,7 @@ impl const Default for &[T] { } #[stable(feature = "mut_slice_default", since = "1.5.0")] -#[rustc_const_unstable(feature = "const_default", issue = "67792")] +#[rustc_const_unstable(feature = "const_default", issue = "143894")] impl const Default for &mut [T] { /// Creates a mutable empty slice. fn default() -> Self { diff --git a/library/core/src/str/mod.rs b/library/core/src/str/mod.rs index 32a2298817538..029abf1753913 100644 --- a/library/core/src/str/mod.rs +++ b/library/core/src/str/mod.rs @@ -589,8 +589,9 @@ impl str { /// assert!(v.get(..42).is_none()); /// ``` #[stable(feature = "str_checked_slicing", since = "1.20.0")] + #[rustc_const_unstable(feature = "const_index", issue = "143775")] #[inline] - pub fn get>(&self, i: I) -> Option<&I::Output> { + pub const fn get>(&self, i: I) -> Option<&I::Output> { i.get(self) } @@ -621,8 +622,9 @@ impl str { /// assert_eq!("HEllo", v); /// ``` #[stable(feature = "str_checked_slicing", since = "1.20.0")] + #[rustc_const_unstable(feature = "const_index", issue = "143775")] #[inline] - pub fn get_mut>(&mut self, i: I) -> Option<&mut I::Output> { + pub const fn get_mut>(&mut self, i: I) -> Option<&mut I::Output> { i.get_mut(self) } @@ -952,6 +954,7 @@ impl str { /// /// The caller must ensure that `mid` is a valid byte offset from the start /// of the string and falls on the boundary of a UTF-8 code point. + #[inline] const unsafe fn split_at_unchecked(&self, mid: usize) -> (&str, &str) { let len = self.len(); let ptr = self.as_ptr(); @@ -2648,7 +2651,6 @@ impl str { /// you're trying to parse into. /// /// `parse` can parse into any type that implements the [`FromStr`] trait. - /// /// # Errors /// @@ -3072,7 +3074,7 @@ impl AsRef<[u8]> for str { } #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_const_unstable(feature = "const_default", issue = "67792")] +#[rustc_const_unstable(feature = "const_default", issue = "143894")] impl const Default for &str { /// Creates an empty str #[inline] @@ -3082,7 +3084,7 @@ impl const Default for &str { } #[stable(feature = "default_mut_str", since = "1.28.0")] -#[rustc_const_unstable(feature = "const_default", issue = "67792")] +#[rustc_const_unstable(feature = "const_default", issue = "143894")] impl const Default for &mut str { /// Creates an empty mutable str #[inline] diff --git a/library/core/src/str/traits.rs b/library/core/src/str/traits.rs index b9559c8317133..d0f2b9226bf6d 100644 --- a/library/core/src/str/traits.rs +++ b/library/core/src/str/traits.rs @@ -23,7 +23,8 @@ impl Ord for str { } #[stable(feature = "rust1", since = "1.0.0")] -impl PartialEq for str { +#[rustc_const_unstable(feature = "const_cmp", issue = "143800")] +impl const PartialEq for str { #[inline] fn eq(&self, other: &str) -> bool { self.as_bytes() == other.as_bytes() @@ -49,9 +50,10 @@ impl PartialOrd for str { } #[stable(feature = "rust1", since = "1.0.0")] -impl ops::Index for str +#[rustc_const_unstable(feature = "const_index", issue = "143775")] +impl const ops::Index for str where - I: SliceIndex, + I: ~const SliceIndex, { type Output = I::Output; @@ -62,9 +64,10 @@ where } #[stable(feature = "rust1", since = "1.0.0")] -impl ops::IndexMut for str +#[rustc_const_unstable(feature = "const_index", issue = "143775")] +impl const ops::IndexMut for str where - I: SliceIndex, + I: ~const SliceIndex, { #[inline] fn index_mut(&mut self, index: I) -> &mut I::Output { @@ -92,7 +95,8 @@ const fn str_index_overflow_fail() -> ! { /// /// Equivalent to `&self[0 .. len]` or `&mut self[0 .. len]`. #[stable(feature = "str_checked_slicing", since = "1.20.0")] -unsafe impl SliceIndex for ops::RangeFull { +#[rustc_const_unstable(feature = "const_index", issue = "143775")] +unsafe impl const SliceIndex for ops::RangeFull { type Output = str; #[inline] fn get(self, slice: &str) -> Option<&Self::Output> { @@ -156,7 +160,8 @@ unsafe impl SliceIndex for ops::RangeFull { /// // &s[3 .. 100]; /// ``` #[stable(feature = "str_checked_slicing", since = "1.20.0")] -unsafe impl SliceIndex for ops::Range { +#[rustc_const_unstable(feature = "const_index", issue = "143775")] +unsafe impl const SliceIndex for ops::Range { type Output = str; #[inline] fn get(self, slice: &str) -> Option<&Self::Output> { @@ -260,7 +265,8 @@ unsafe impl SliceIndex for ops::Range { } #[unstable(feature = "new_range_api", issue = "125687")] -unsafe impl SliceIndex for range::Range { +#[rustc_const_unstable(feature = "const_index", issue = "143775")] +unsafe impl const SliceIndex for range::Range { type Output = str; #[inline] fn get(self, slice: &str) -> Option<&Self::Output> { @@ -431,7 +437,8 @@ unsafe impl SliceIndex for (ops::Bound, ops::Bound) { /// Panics if `end` does not point to the starting byte offset of a /// character (as defined by `is_char_boundary`), or if `end > len`. #[stable(feature = "str_checked_slicing", since = "1.20.0")] -unsafe impl SliceIndex for ops::RangeTo { +#[rustc_const_unstable(feature = "const_index", issue = "143775")] +unsafe impl const SliceIndex for ops::RangeTo { type Output = str; #[inline] fn get(self, slice: &str) -> Option<&Self::Output> { @@ -499,7 +506,8 @@ unsafe impl SliceIndex for ops::RangeTo { /// Panics if `begin` does not point to the starting byte offset of /// a character (as defined by `is_char_boundary`), or if `begin > len`. #[stable(feature = "str_checked_slicing", since = "1.20.0")] -unsafe impl SliceIndex for ops::RangeFrom { +#[rustc_const_unstable(feature = "const_index", issue = "143775")] +unsafe impl const SliceIndex for ops::RangeFrom { type Output = str; #[inline] fn get(self, slice: &str) -> Option<&Self::Output> { @@ -554,7 +562,8 @@ unsafe impl SliceIndex for ops::RangeFrom { } #[unstable(feature = "new_range_api", issue = "125687")] -unsafe impl SliceIndex for range::RangeFrom { +#[rustc_const_unstable(feature = "const_index", issue = "143775")] +unsafe impl const SliceIndex for range::RangeFrom { type Output = str; #[inline] fn get(self, slice: &str) -> Option<&Self::Output> { @@ -625,7 +634,8 @@ unsafe impl SliceIndex for range::RangeFrom { /// to the ending byte offset of a character (`end + 1` is either a starting /// byte offset or equal to `len`), if `begin > end`, or if `end >= len`. #[stable(feature = "inclusive_range", since = "1.26.0")] -unsafe impl SliceIndex for ops::RangeInclusive { +#[rustc_const_unstable(feature = "const_index", issue = "143775")] +unsafe impl const SliceIndex for ops::RangeInclusive { type Output = str; #[inline] fn get(self, slice: &str) -> Option<&Self::Output> { @@ -662,7 +672,8 @@ unsafe impl SliceIndex for ops::RangeInclusive { } #[unstable(feature = "new_range_api", issue = "125687")] -unsafe impl SliceIndex for range::RangeInclusive { +#[rustc_const_unstable(feature = "const_index", issue = "143775")] +unsafe impl const SliceIndex for range::RangeInclusive { type Output = str; #[inline] fn get(self, slice: &str) -> Option<&Self::Output> { @@ -713,7 +724,8 @@ unsafe impl SliceIndex for range::RangeInclusive { /// (`end + 1` is either a starting byte offset as defined by /// `is_char_boundary`, or equal to `len`), or if `end >= len`. #[stable(feature = "inclusive_range", since = "1.26.0")] -unsafe impl SliceIndex for ops::RangeToInclusive { +#[rustc_const_unstable(feature = "const_index", issue = "143775")] +unsafe impl const SliceIndex for ops::RangeToInclusive { type Output = str; #[inline] fn get(self, slice: &str) -> Option<&Self::Output> { diff --git a/library/core/src/tuple.rs b/library/core/src/tuple.rs index 9cf08e74ff692..23a0a6877df77 100644 --- a/library/core/src/tuple.rs +++ b/library/core/src/tuple.rs @@ -3,7 +3,6 @@ use crate::cmp::Ordering::{self, *}; use crate::marker::{ConstParamTy_, StructuralPartialEq, UnsizedConstParamTy}; use crate::ops::ControlFlow::{self, Break, Continue}; -use crate::random::{Random, RandomSource}; // Recursive macro for implementing n-ary tuple functions and operations // @@ -131,16 +130,6 @@ macro_rules! tuple_impls { } } - maybe_tuple_doc! { - $($T)+ @ - #[unstable(feature = "random", issue = "130703")] - impl<$($T: Random),+> Random for ($($T,)+) { - fn random(source: &mut (impl RandomSource + ?Sized)) -> Self { - ($({ let x: $T = Random::random(source); x},)+) - } - } - } - maybe_tuple_doc! { $($T)+ @ #[stable(feature = "array_tuple_conv", since = "1.71.0")] diff --git a/library/core/src/ub_checks.rs b/library/core/src/ub_checks.rs index 6c217fa2ffaad..8b937ed48b848 100644 --- a/library/core/src/ub_checks.rs +++ b/library/core/src/ub_checks.rs @@ -120,13 +120,25 @@ pub(crate) const fn maybe_is_aligned_and_not_null( align: usize, is_zst: bool, ) -> bool { + // This is just for safety checks so we can const_eval_select. + maybe_is_aligned(ptr, align) && (is_zst || !ptr.is_null()) +} + +/// Checks whether `ptr` is properly aligned with respect to the given alignment. +/// +/// In `const` this is approximate and can fail spuriously. It is primarily intended +/// for `assert_unsafe_precondition!` with `check_language_ub`, in which case the +/// check is anyway not executed in `const`. +#[inline] +#[rustc_allow_const_fn_unstable(const_eval_select)] +pub(crate) const fn maybe_is_aligned(ptr: *const (), align: usize) -> bool { // This is just for safety checks so we can const_eval_select. const_eval_select!( - @capture { ptr: *const (), align: usize, is_zst: bool } -> bool: + @capture { ptr: *const (), align: usize } -> bool: if const { - is_zst || !ptr.is_null() + true } else { - ptr.is_aligned_to(align) && (is_zst || !ptr.is_null()) + ptr.is_aligned_to(align) } ) } diff --git a/library/coretests/tests/floats/f128.rs b/library/coretests/tests/floats/f128.rs index 38df09a91c103..36d6a20a94427 100644 --- a/library/coretests/tests/floats/f128.rs +++ b/library/coretests/tests/floats/f128.rs @@ -1,9 +1,7 @@ // FIXME(f16_f128): only tested on platforms that have symbols and aren't buggy #![cfg(target_has_reliable_f128)] -use core::ops::{Add, Div, Mul, Sub}; use std::f128::consts; -use std::num::FpCategory as Fp; use super::{assert_approx_eq, assert_biteq}; @@ -38,160 +36,9 @@ const NAN_MASK1: u128 = 0x0000aaaaaaaaaaaaaaaaaaaaaaaaaaaa; /// Second pattern over the mantissa const NAN_MASK2: u128 = 0x00005555555555555555555555555555; -#[test] -fn test_num_f128() { - // FIXME(f16_f128): replace with a `test_num` call once the required `fmodl`/`fmodf128` - // function is available on all platforms. - let ten = 10f128; - let two = 2f128; - assert_biteq!(ten.add(two), ten + two); - assert_biteq!(ten.sub(two), ten - two); - assert_biteq!(ten.mul(two), ten * two); - assert_biteq!(ten.div(two), ten / two); - #[cfg(any(miri, target_has_reliable_f128_math))] - assert_biteq!(core::ops::Rem::rem(ten, two), ten % two); -} - // FIXME(f16_f128,miri): many of these have to be disabled since miri does not yet support // the intrinsics. -#[test] -fn test_infinity() { - let inf: f128 = f128::INFINITY; - assert!(inf.is_infinite()); - assert!(!inf.is_finite()); - assert!(inf.is_sign_positive()); - assert!(!inf.is_sign_negative()); - assert!(!inf.is_nan()); - assert!(!inf.is_normal()); - assert_eq!(Fp::Infinite, inf.classify()); -} - -#[test] -fn test_neg_infinity() { - let neg_inf: f128 = f128::NEG_INFINITY; - assert!(neg_inf.is_infinite()); - assert!(!neg_inf.is_finite()); - assert!(!neg_inf.is_sign_positive()); - assert!(neg_inf.is_sign_negative()); - assert!(!neg_inf.is_nan()); - assert!(!neg_inf.is_normal()); - assert_eq!(Fp::Infinite, neg_inf.classify()); -} - -#[test] -fn test_zero() { - let zero: f128 = 0.0f128; - assert_biteq!(0.0, zero); - assert!(!zero.is_infinite()); - assert!(zero.is_finite()); - assert!(zero.is_sign_positive()); - assert!(!zero.is_sign_negative()); - assert!(!zero.is_nan()); - assert!(!zero.is_normal()); - assert_eq!(Fp::Zero, zero.classify()); -} - -#[test] -fn test_neg_zero() { - let neg_zero: f128 = -0.0; - assert_eq!(0.0, neg_zero); - assert_biteq!(-0.0, neg_zero); - assert!(!neg_zero.is_infinite()); - assert!(neg_zero.is_finite()); - assert!(!neg_zero.is_sign_positive()); - assert!(neg_zero.is_sign_negative()); - assert!(!neg_zero.is_nan()); - assert!(!neg_zero.is_normal()); - assert_eq!(Fp::Zero, neg_zero.classify()); -} - -#[test] -fn test_one() { - let one: f128 = 1.0f128; - assert_biteq!(1.0, one); - assert!(!one.is_infinite()); - assert!(one.is_finite()); - assert!(one.is_sign_positive()); - assert!(!one.is_sign_negative()); - assert!(!one.is_nan()); - assert!(one.is_normal()); - assert_eq!(Fp::Normal, one.classify()); -} - -#[test] -fn test_is_nan() { - let nan: f128 = f128::NAN; - let inf: f128 = f128::INFINITY; - let neg_inf: f128 = f128::NEG_INFINITY; - assert!(nan.is_nan()); - assert!(!0.0f128.is_nan()); - assert!(!5.3f128.is_nan()); - assert!(!(-10.732f128).is_nan()); - assert!(!inf.is_nan()); - assert!(!neg_inf.is_nan()); -} - -#[test] -fn test_is_infinite() { - let nan: f128 = f128::NAN; - let inf: f128 = f128::INFINITY; - let neg_inf: f128 = f128::NEG_INFINITY; - assert!(!nan.is_infinite()); - assert!(inf.is_infinite()); - assert!(neg_inf.is_infinite()); - assert!(!0.0f128.is_infinite()); - assert!(!42.8f128.is_infinite()); - assert!(!(-109.2f128).is_infinite()); -} - -#[test] -fn test_is_finite() { - let nan: f128 = f128::NAN; - let inf: f128 = f128::INFINITY; - let neg_inf: f128 = f128::NEG_INFINITY; - assert!(!nan.is_finite()); - assert!(!inf.is_finite()); - assert!(!neg_inf.is_finite()); - assert!(0.0f128.is_finite()); - assert!(42.8f128.is_finite()); - assert!((-109.2f128).is_finite()); -} - -#[test] -fn test_is_normal() { - let nan: f128 = f128::NAN; - let inf: f128 = f128::INFINITY; - let neg_inf: f128 = f128::NEG_INFINITY; - let zero: f128 = 0.0f128; - let neg_zero: f128 = -0.0; - assert!(!nan.is_normal()); - assert!(!inf.is_normal()); - assert!(!neg_inf.is_normal()); - assert!(!zero.is_normal()); - assert!(!neg_zero.is_normal()); - assert!(1f128.is_normal()); - assert!(1e-4931f128.is_normal()); - assert!(!1e-4932f128.is_normal()); -} - -#[test] -fn test_classify() { - let nan: f128 = f128::NAN; - let inf: f128 = f128::INFINITY; - let neg_inf: f128 = f128::NEG_INFINITY; - let zero: f128 = 0.0f128; - let neg_zero: f128 = -0.0; - assert_eq!(nan.classify(), Fp::Nan); - assert_eq!(inf.classify(), Fp::Infinite); - assert_eq!(neg_inf.classify(), Fp::Infinite); - assert_eq!(zero.classify(), Fp::Zero); - assert_eq!(neg_zero.classify(), Fp::Zero); - assert_eq!(1f128.classify(), Fp::Normal); - assert_eq!(1e-4931f128.classify(), Fp::Normal); - assert_eq!(1e-4932f128.classify(), Fp::Subnormal); -} - #[test] #[cfg(any(miri, target_has_reliable_f128_math))] fn test_abs() { diff --git a/library/coretests/tests/floats/f16.rs b/library/coretests/tests/floats/f16.rs index f6749d796cca9..351c008a37bab 100644 --- a/library/coretests/tests/floats/f16.rs +++ b/library/coretests/tests/floats/f16.rs @@ -2,7 +2,6 @@ #![cfg(target_has_reliable_f16)] use std::f16::consts; -use std::num::FpCategory as Fp; use super::{assert_approx_eq, assert_biteq}; @@ -43,151 +42,9 @@ const NAN_MASK1: u16 = 0x02aa; /// Second pattern over the mantissa const NAN_MASK2: u16 = 0x0155; -#[test] -fn test_num_f16() { - super::test_num(10f16, 2f16); -} - // FIXME(f16_f128,miri): many of these have to be disabled since miri does not yet support // the intrinsics. -#[test] -fn test_infinity() { - let inf: f16 = f16::INFINITY; - assert!(inf.is_infinite()); - assert!(!inf.is_finite()); - assert!(inf.is_sign_positive()); - assert!(!inf.is_sign_negative()); - assert!(!inf.is_nan()); - assert!(!inf.is_normal()); - assert_eq!(Fp::Infinite, inf.classify()); -} - -#[test] -fn test_neg_infinity() { - let neg_inf: f16 = f16::NEG_INFINITY; - assert!(neg_inf.is_infinite()); - assert!(!neg_inf.is_finite()); - assert!(!neg_inf.is_sign_positive()); - assert!(neg_inf.is_sign_negative()); - assert!(!neg_inf.is_nan()); - assert!(!neg_inf.is_normal()); - assert_eq!(Fp::Infinite, neg_inf.classify()); -} - -#[test] -fn test_zero() { - let zero: f16 = 0.0f16; - assert_biteq!(0.0, zero); - assert!(!zero.is_infinite()); - assert!(zero.is_finite()); - assert!(zero.is_sign_positive()); - assert!(!zero.is_sign_negative()); - assert!(!zero.is_nan()); - assert!(!zero.is_normal()); - assert_eq!(Fp::Zero, zero.classify()); -} - -#[test] -fn test_neg_zero() { - let neg_zero: f16 = -0.0; - assert_eq!(0.0, neg_zero); - assert_biteq!(-0.0, neg_zero); - assert!(!neg_zero.is_infinite()); - assert!(neg_zero.is_finite()); - assert!(!neg_zero.is_sign_positive()); - assert!(neg_zero.is_sign_negative()); - assert!(!neg_zero.is_nan()); - assert!(!neg_zero.is_normal()); - assert_eq!(Fp::Zero, neg_zero.classify()); -} - -#[test] -fn test_one() { - let one: f16 = 1.0f16; - assert_biteq!(1.0, one); - assert!(!one.is_infinite()); - assert!(one.is_finite()); - assert!(one.is_sign_positive()); - assert!(!one.is_sign_negative()); - assert!(!one.is_nan()); - assert!(one.is_normal()); - assert_eq!(Fp::Normal, one.classify()); -} - -#[test] -fn test_is_nan() { - let nan: f16 = f16::NAN; - let inf: f16 = f16::INFINITY; - let neg_inf: f16 = f16::NEG_INFINITY; - assert!(nan.is_nan()); - assert!(!0.0f16.is_nan()); - assert!(!5.3f16.is_nan()); - assert!(!(-10.732f16).is_nan()); - assert!(!inf.is_nan()); - assert!(!neg_inf.is_nan()); -} - -#[test] -fn test_is_infinite() { - let nan: f16 = f16::NAN; - let inf: f16 = f16::INFINITY; - let neg_inf: f16 = f16::NEG_INFINITY; - assert!(!nan.is_infinite()); - assert!(inf.is_infinite()); - assert!(neg_inf.is_infinite()); - assert!(!0.0f16.is_infinite()); - assert!(!42.8f16.is_infinite()); - assert!(!(-109.2f16).is_infinite()); -} - -#[test] -fn test_is_finite() { - let nan: f16 = f16::NAN; - let inf: f16 = f16::INFINITY; - let neg_inf: f16 = f16::NEG_INFINITY; - assert!(!nan.is_finite()); - assert!(!inf.is_finite()); - assert!(!neg_inf.is_finite()); - assert!(0.0f16.is_finite()); - assert!(42.8f16.is_finite()); - assert!((-109.2f16).is_finite()); -} - -#[test] -fn test_is_normal() { - let nan: f16 = f16::NAN; - let inf: f16 = f16::INFINITY; - let neg_inf: f16 = f16::NEG_INFINITY; - let zero: f16 = 0.0f16; - let neg_zero: f16 = -0.0; - assert!(!nan.is_normal()); - assert!(!inf.is_normal()); - assert!(!neg_inf.is_normal()); - assert!(!zero.is_normal()); - assert!(!neg_zero.is_normal()); - assert!(1f16.is_normal()); - assert!(1e-4f16.is_normal()); - assert!(!1e-5f16.is_normal()); -} - -#[test] -fn test_classify() { - let nan: f16 = f16::NAN; - let inf: f16 = f16::INFINITY; - let neg_inf: f16 = f16::NEG_INFINITY; - let zero: f16 = 0.0f16; - let neg_zero: f16 = -0.0; - assert_eq!(nan.classify(), Fp::Nan); - assert_eq!(inf.classify(), Fp::Infinite); - assert_eq!(neg_inf.classify(), Fp::Infinite); - assert_eq!(zero.classify(), Fp::Zero); - assert_eq!(neg_zero.classify(), Fp::Zero); - assert_eq!(1f16.classify(), Fp::Normal); - assert_eq!(1e-4f16.classify(), Fp::Normal); - assert_eq!(1e-5f16.classify(), Fp::Subnormal); -} - #[test] #[cfg(any(miri, target_has_reliable_f16_math))] fn test_abs() { diff --git a/library/coretests/tests/floats/f32.rs b/library/coretests/tests/floats/f32.rs index f5d5723fea455..267b0e4e29434 100644 --- a/library/coretests/tests/floats/f32.rs +++ b/library/coretests/tests/floats/f32.rs @@ -1,6 +1,5 @@ use core::f32; use core::f32::consts; -use core::num::FpCategory as Fp; use super::{assert_approx_eq, assert_biteq}; @@ -30,148 +29,6 @@ const NAN_MASK2: u32 = 0x0055_5555; /// They serve as a way to get an idea of the real precision of floating point operations on different platforms. const APPROX_DELTA: f32 = if cfg!(miri) { 1e-4 } else { 1e-6 }; -#[test] -fn test_num_f32() { - super::test_num(10f32, 2f32); -} - -#[test] -fn test_infinity() { - let inf: f32 = f32::INFINITY; - assert!(inf.is_infinite()); - assert!(!inf.is_finite()); - assert!(inf.is_sign_positive()); - assert!(!inf.is_sign_negative()); - assert!(!inf.is_nan()); - assert!(!inf.is_normal()); - assert_eq!(Fp::Infinite, inf.classify()); -} - -#[test] -fn test_neg_infinity() { - let neg_inf: f32 = f32::NEG_INFINITY; - assert!(neg_inf.is_infinite()); - assert!(!neg_inf.is_finite()); - assert!(!neg_inf.is_sign_positive()); - assert!(neg_inf.is_sign_negative()); - assert!(!neg_inf.is_nan()); - assert!(!neg_inf.is_normal()); - assert_eq!(Fp::Infinite, neg_inf.classify()); -} - -#[test] -fn test_zero() { - let zero: f32 = 0.0f32; - assert_biteq!(0.0, zero); - assert!(!zero.is_infinite()); - assert!(zero.is_finite()); - assert!(zero.is_sign_positive()); - assert!(!zero.is_sign_negative()); - assert!(!zero.is_nan()); - assert!(!zero.is_normal()); - assert_eq!(Fp::Zero, zero.classify()); -} - -#[test] -fn test_neg_zero() { - let neg_zero: f32 = -0.0; - assert_eq!(0.0, neg_zero); - assert_biteq!(-0.0, neg_zero); - assert!(!neg_zero.is_infinite()); - assert!(neg_zero.is_finite()); - assert!(!neg_zero.is_sign_positive()); - assert!(neg_zero.is_sign_negative()); - assert!(!neg_zero.is_nan()); - assert!(!neg_zero.is_normal()); - assert_eq!(Fp::Zero, neg_zero.classify()); -} - -#[test] -fn test_one() { - let one: f32 = 1.0f32; - assert_biteq!(1.0, one); - assert!(!one.is_infinite()); - assert!(one.is_finite()); - assert!(one.is_sign_positive()); - assert!(!one.is_sign_negative()); - assert!(!one.is_nan()); - assert!(one.is_normal()); - assert_eq!(Fp::Normal, one.classify()); -} - -#[test] -fn test_is_nan() { - let nan: f32 = f32::NAN; - let inf: f32 = f32::INFINITY; - let neg_inf: f32 = f32::NEG_INFINITY; - assert!(nan.is_nan()); - assert!(!0.0f32.is_nan()); - assert!(!5.3f32.is_nan()); - assert!(!(-10.732f32).is_nan()); - assert!(!inf.is_nan()); - assert!(!neg_inf.is_nan()); -} - -#[test] -fn test_is_infinite() { - let nan: f32 = f32::NAN; - let inf: f32 = f32::INFINITY; - let neg_inf: f32 = f32::NEG_INFINITY; - assert!(!nan.is_infinite()); - assert!(inf.is_infinite()); - assert!(neg_inf.is_infinite()); - assert!(!0.0f32.is_infinite()); - assert!(!42.8f32.is_infinite()); - assert!(!(-109.2f32).is_infinite()); -} - -#[test] -fn test_is_finite() { - let nan: f32 = f32::NAN; - let inf: f32 = f32::INFINITY; - let neg_inf: f32 = f32::NEG_INFINITY; - assert!(!nan.is_finite()); - assert!(!inf.is_finite()); - assert!(!neg_inf.is_finite()); - assert!(0.0f32.is_finite()); - assert!(42.8f32.is_finite()); - assert!((-109.2f32).is_finite()); -} - -#[test] -fn test_is_normal() { - let nan: f32 = f32::NAN; - let inf: f32 = f32::INFINITY; - let neg_inf: f32 = f32::NEG_INFINITY; - let zero: f32 = 0.0f32; - let neg_zero: f32 = -0.0; - assert!(!nan.is_normal()); - assert!(!inf.is_normal()); - assert!(!neg_inf.is_normal()); - assert!(!zero.is_normal()); - assert!(!neg_zero.is_normal()); - assert!(1f32.is_normal()); - assert!(1e-37f32.is_normal()); - assert!(!1e-38f32.is_normal()); -} - -#[test] -fn test_classify() { - let nan: f32 = f32::NAN; - let inf: f32 = f32::INFINITY; - let neg_inf: f32 = f32::NEG_INFINITY; - let zero: f32 = 0.0f32; - let neg_zero: f32 = -0.0; - assert_eq!(nan.classify(), Fp::Nan); - assert_eq!(inf.classify(), Fp::Infinite); - assert_eq!(neg_inf.classify(), Fp::Infinite); - assert_eq!(zero.classify(), Fp::Zero); - assert_eq!(neg_zero.classify(), Fp::Zero); - assert_eq!(1f32.classify(), Fp::Normal); - assert_eq!(1e-37f32.classify(), Fp::Normal); - assert_eq!(1e-38f32.classify(), Fp::Subnormal); -} - #[test] fn test_abs() { assert_biteq!(f32::INFINITY.abs(), f32::INFINITY); diff --git a/library/coretests/tests/floats/f64.rs b/library/coretests/tests/floats/f64.rs index 34af87c241e91..735b7a7651519 100644 --- a/library/coretests/tests/floats/f64.rs +++ b/library/coretests/tests/floats/f64.rs @@ -1,6 +1,5 @@ use core::f64; use core::f64::consts; -use core::num::FpCategory as Fp; use super::{assert_approx_eq, assert_biteq}; @@ -25,147 +24,6 @@ const NAN_MASK1: u64 = 0x000a_aaaa_aaaa_aaaa; /// Second pattern over the mantissa const NAN_MASK2: u64 = 0x0005_5555_5555_5555; -#[test] -fn test_num_f64() { - super::test_num(10f64, 2f64); -} - -#[test] -fn test_infinity() { - let inf: f64 = f64::INFINITY; - assert!(inf.is_infinite()); - assert!(!inf.is_finite()); - assert!(inf.is_sign_positive()); - assert!(!inf.is_sign_negative()); - assert!(!inf.is_nan()); - assert!(!inf.is_normal()); - assert_eq!(Fp::Infinite, inf.classify()); -} - -#[test] -fn test_neg_infinity() { - let neg_inf: f64 = f64::NEG_INFINITY; - assert!(neg_inf.is_infinite()); - assert!(!neg_inf.is_finite()); - assert!(!neg_inf.is_sign_positive()); - assert!(neg_inf.is_sign_negative()); - assert!(!neg_inf.is_nan()); - assert!(!neg_inf.is_normal()); - assert_eq!(Fp::Infinite, neg_inf.classify()); -} - -#[test] -fn test_zero() { - let zero: f64 = 0.0f64; - assert_biteq!(0.0, zero); - assert!(!zero.is_infinite()); - assert!(zero.is_finite()); - assert!(zero.is_sign_positive()); - assert!(!zero.is_sign_negative()); - assert!(!zero.is_nan()); - assert!(!zero.is_normal()); - assert_eq!(Fp::Zero, zero.classify()); -} - -#[test] -fn test_neg_zero() { - let neg_zero: f64 = -0.0; - assert_eq!(0.0, neg_zero); - assert_biteq!(-0.0, neg_zero); - assert!(!neg_zero.is_infinite()); - assert!(neg_zero.is_finite()); - assert!(!neg_zero.is_sign_positive()); - assert!(neg_zero.is_sign_negative()); - assert!(!neg_zero.is_nan()); - assert!(!neg_zero.is_normal()); - assert_eq!(Fp::Zero, neg_zero.classify()); -} - -#[test] -fn test_one() { - let one: f64 = 1.0f64; - assert_biteq!(1.0, one); - assert!(!one.is_infinite()); - assert!(one.is_finite()); - assert!(one.is_sign_positive()); - assert!(!one.is_sign_negative()); - assert!(!one.is_nan()); - assert!(one.is_normal()); - assert_eq!(Fp::Normal, one.classify()); -} - -#[test] -fn test_is_nan() { - let nan: f64 = f64::NAN; - let inf: f64 = f64::INFINITY; - let neg_inf: f64 = f64::NEG_INFINITY; - assert!(nan.is_nan()); - assert!(!0.0f64.is_nan()); - assert!(!5.3f64.is_nan()); - assert!(!(-10.732f64).is_nan()); - assert!(!inf.is_nan()); - assert!(!neg_inf.is_nan()); -} - -#[test] -fn test_is_infinite() { - let nan: f64 = f64::NAN; - let inf: f64 = f64::INFINITY; - let neg_inf: f64 = f64::NEG_INFINITY; - assert!(!nan.is_infinite()); - assert!(inf.is_infinite()); - assert!(neg_inf.is_infinite()); - assert!(!0.0f64.is_infinite()); - assert!(!42.8f64.is_infinite()); - assert!(!(-109.2f64).is_infinite()); -} - -#[test] -fn test_is_finite() { - let nan: f64 = f64::NAN; - let inf: f64 = f64::INFINITY; - let neg_inf: f64 = f64::NEG_INFINITY; - assert!(!nan.is_finite()); - assert!(!inf.is_finite()); - assert!(!neg_inf.is_finite()); - assert!(0.0f64.is_finite()); - assert!(42.8f64.is_finite()); - assert!((-109.2f64).is_finite()); -} - -#[test] -fn test_is_normal() { - let nan: f64 = f64::NAN; - let inf: f64 = f64::INFINITY; - let neg_inf: f64 = f64::NEG_INFINITY; - let zero: f64 = 0.0f64; - let neg_zero: f64 = -0.0; - assert!(!nan.is_normal()); - assert!(!inf.is_normal()); - assert!(!neg_inf.is_normal()); - assert!(!zero.is_normal()); - assert!(!neg_zero.is_normal()); - assert!(1f64.is_normal()); - assert!(1e-307f64.is_normal()); - assert!(!1e-308f64.is_normal()); -} - -#[test] -fn test_classify() { - let nan: f64 = f64::NAN; - let inf: f64 = f64::INFINITY; - let neg_inf: f64 = f64::NEG_INFINITY; - let zero: f64 = 0.0f64; - let neg_zero: f64 = -0.0; - assert_eq!(nan.classify(), Fp::Nan); - assert_eq!(inf.classify(), Fp::Infinite); - assert_eq!(neg_inf.classify(), Fp::Infinite); - assert_eq!(zero.classify(), Fp::Zero); - assert_eq!(neg_zero.classify(), Fp::Zero); - assert_eq!(1e-307f64.classify(), Fp::Normal); - assert_eq!(1e-308f64.classify(), Fp::Subnormal); -} - #[test] fn test_abs() { assert_biteq!(f64::INFINITY.abs(), f64::INFINITY); diff --git a/library/coretests/tests/floats/mod.rs b/library/coretests/tests/floats/mod.rs index 36743a7d6df9e..43431bba6954b 100644 --- a/library/coretests/tests/floats/mod.rs +++ b/library/coretests/tests/floats/mod.rs @@ -1,28 +1,40 @@ -use std::fmt; use std::num::FpCategory as Fp; use std::ops::{Add, Div, Mul, Rem, Sub}; -/// Set the default tolerance for float comparison based on the type. -trait Approx { - const LIM: Self; +trait TestableFloat { + /// Set the default tolerance for float comparison based on the type. + const APPROX: Self; + const MIN_POSITIVE_NORMAL: Self; + const MAX_SUBNORMAL: Self; } -impl Approx for f16 { - const LIM: Self = 1e-3; +impl TestableFloat for f16 { + const APPROX: Self = 1e-3; + const MIN_POSITIVE_NORMAL: Self = Self::MIN_POSITIVE; + const MAX_SUBNORMAL: Self = Self::MIN_POSITIVE.next_down(); } -impl Approx for f32 { - const LIM: Self = 1e-6; + +impl TestableFloat for f32 { + const APPROX: Self = 1e-6; + const MIN_POSITIVE_NORMAL: Self = Self::MIN_POSITIVE; + const MAX_SUBNORMAL: Self = Self::MIN_POSITIVE.next_down(); } -impl Approx for f64 { - const LIM: Self = 1e-6; + +impl TestableFloat for f64 { + const APPROX: Self = 1e-6; + const MIN_POSITIVE_NORMAL: Self = Self::MIN_POSITIVE; + const MAX_SUBNORMAL: Self = Self::MIN_POSITIVE.next_down(); } -impl Approx for f128 { - const LIM: Self = 1e-9; + +impl TestableFloat for f128 { + const APPROX: Self = 1e-9; + const MIN_POSITIVE_NORMAL: Self = Self::MIN_POSITIVE; + const MAX_SUBNORMAL: Self = Self::MIN_POSITIVE.next_down(); } /// Determine the tolerance for values of the argument type. -const fn lim_for_ty(_x: T) -> T { - T::LIM +const fn lim_for_ty(_x: T) -> T { + T::APPROX } // We have runtime ("rt") and const versions of these macros. @@ -187,9 +199,11 @@ macro_rules! float_test { $( $( #[$const_meta] )+ )? mod const_ { #[allow(unused)] - use super::Approx; + use super::TestableFloat; #[allow(unused)] use std::num::FpCategory as Fp; + #[allow(unused)] + use std::ops::{Add, Div, Mul, Rem, Sub}; // Shadow the runtime versions of the macro with const-compatible versions. #[allow(unused)] use $crate::floats::{ @@ -229,30 +243,42 @@ macro_rules! float_test { }; } -/// Helper function for testing numeric operations -pub fn test_num(ten: T, two: T) -where - T: PartialEq - + Add - + Sub - + Mul - + Div - + Rem - + fmt::Debug - + Copy, -{ - assert_eq!(ten.add(two), ten + two); - assert_eq!(ten.sub(two), ten - two); - assert_eq!(ten.mul(two), ten * two); - assert_eq!(ten.div(two), ten / two); - assert_eq!(ten.rem(two), ten % two); -} - mod f128; mod f16; mod f32; mod f64; +float_test! { + name: num, + attrs: { + f16: #[cfg(any(miri, target_has_reliable_f16))], + f128: #[cfg(any(miri, target_has_reliable_f128))], + }, + test { + let two: Float = 2.0; + let ten: Float = 10.0; + assert_biteq!(ten.add(two), ten + two); + assert_biteq!(ten.sub(two), ten - two); + assert_biteq!(ten.mul(two), ten * two); + assert_biteq!(ten.div(two), ten / two); + } +} + +// FIXME(f16_f128): merge into `num` once the required `fmodl`/`fmodf128` function is available on +// all platforms. +float_test! { + name: num_rem, + attrs: { + f16: #[cfg(any(miri, target_has_reliable_f16_math))], + f128: #[cfg(any(miri, target_has_reliable_f128_math))], + }, + test { + let two: Float = 2.0; + let ten: Float = 10.0; + assert_biteq!(ten.rem(two), ten % two); + } +} + float_test! { name: nan, attrs: { @@ -273,6 +299,213 @@ float_test! { } } +float_test! { + name: infinity, + attrs: { + f16: #[cfg(any(miri, target_has_reliable_f16))], + f128: #[cfg(any(miri, target_has_reliable_f128))], + }, + test { + let inf: Float = Float::INFINITY; + assert!(inf.is_infinite()); + assert!(!inf.is_finite()); + assert!(inf.is_sign_positive()); + assert!(!inf.is_sign_negative()); + assert!(!inf.is_nan()); + assert!(!inf.is_normal()); + assert!(matches!(inf.classify(), Fp::Infinite)); + } +} + +float_test! { + name: neg_infinity, + attrs: { + f16: #[cfg(any(miri, target_has_reliable_f16))], + f128: #[cfg(any(miri, target_has_reliable_f128))], + }, + test { + let neg_inf: Float = Float::NEG_INFINITY; + assert!(neg_inf.is_infinite()); + assert!(!neg_inf.is_finite()); + assert!(!neg_inf.is_sign_positive()); + assert!(neg_inf.is_sign_negative()); + assert!(!neg_inf.is_nan()); + assert!(!neg_inf.is_normal()); + assert!(matches!(neg_inf.classify(), Fp::Infinite)); + } +} + +float_test! { + name: zero, + attrs: { + f16: #[cfg(any(miri, target_has_reliable_f16))], + f128: #[cfg(any(miri, target_has_reliable_f128))], + }, + test { + let zero: Float = 0.0; + assert_biteq!(0.0, zero); + assert!(!zero.is_infinite()); + assert!(zero.is_finite()); + assert!(zero.is_sign_positive()); + assert!(!zero.is_sign_negative()); + assert!(!zero.is_nan()); + assert!(!zero.is_normal()); + assert!(matches!(zero.classify(), Fp::Zero)); + } +} + +float_test! { + name: neg_zero, + attrs: { + f16: #[cfg(any(miri, target_has_reliable_f16))], + f128: #[cfg(any(miri, target_has_reliable_f128))], + }, + test { + let neg_zero: Float = -0.0; + assert!(0.0 == neg_zero); + assert_biteq!(-0.0, neg_zero); + assert!(!neg_zero.is_infinite()); + assert!(neg_zero.is_finite()); + assert!(!neg_zero.is_sign_positive()); + assert!(neg_zero.is_sign_negative()); + assert!(!neg_zero.is_nan()); + assert!(!neg_zero.is_normal()); + assert!(matches!(neg_zero.classify(), Fp::Zero)); + } +} + +float_test! { + name: one, + attrs: { + f16: #[cfg(any(miri, target_has_reliable_f16))], + f128: #[cfg(any(miri, target_has_reliable_f128))], + }, + test { + let one: Float = 1.0; + assert_biteq!(1.0, one); + assert!(!one.is_infinite()); + assert!(one.is_finite()); + assert!(one.is_sign_positive()); + assert!(!one.is_sign_negative()); + assert!(!one.is_nan()); + assert!(one.is_normal()); + assert!(matches!(one.classify(), Fp::Normal)); + } +} + +float_test! { + name: is_nan, + attrs: { + f16: #[cfg(any(miri, target_has_reliable_f16))], + f128: #[cfg(any(miri, target_has_reliable_f128))], + }, + test { + let nan: Float = Float::NAN; + let inf: Float = Float::INFINITY; + let neg_inf: Float = Float::NEG_INFINITY; + let zero: Float = 0.0; + let pos: Float = 5.3; + let neg: Float = -10.732; + assert!(nan.is_nan()); + assert!(!zero.is_nan()); + assert!(!pos.is_nan()); + assert!(!neg.is_nan()); + assert!(!inf.is_nan()); + assert!(!neg_inf.is_nan()); + } +} + +float_test! { + name: is_infinite, + attrs: { + f16: #[cfg(any(miri, target_has_reliable_f16))], + f128: #[cfg(any(miri, target_has_reliable_f128))], + }, + test { + let nan: Float = Float::NAN; + let inf: Float = Float::INFINITY; + let neg_inf: Float = Float::NEG_INFINITY; + let zero: Float = 0.0; + let pos: Float = 42.8; + let neg: Float = -109.2; + assert!(!nan.is_infinite()); + assert!(inf.is_infinite()); + assert!(neg_inf.is_infinite()); + assert!(!zero.is_infinite()); + assert!(!pos.is_infinite()); + assert!(!neg.is_infinite()); + } +} + +float_test! { + name: is_finite, + attrs: { + f16: #[cfg(any(miri, target_has_reliable_f16))], + f128: #[cfg(any(miri, target_has_reliable_f128))], + }, + test { + let nan: Float = Float::NAN; + let inf: Float = Float::INFINITY; + let neg_inf: Float = Float::NEG_INFINITY; + let zero: Float = 0.0; + let pos: Float = 42.8; + let neg: Float = -109.2; + assert!(!nan.is_finite()); + assert!(!inf.is_finite()); + assert!(!neg_inf.is_finite()); + assert!(zero.is_finite()); + assert!(pos.is_finite()); + assert!(neg.is_finite()); + } +} + +float_test! { + name: is_normal, + attrs: { + f16: #[cfg(any(miri, target_has_reliable_f16))], + f128: #[cfg(any(miri, target_has_reliable_f128))], + }, + test { + let nan: Float = Float::NAN; + let inf: Float = Float::INFINITY; + let neg_inf: Float = Float::NEG_INFINITY; + let zero: Float = 0.0; + let neg_zero: Float = -0.0; + let one : Float = 1.0; + assert!(!nan.is_normal()); + assert!(!inf.is_normal()); + assert!(!neg_inf.is_normal()); + assert!(!zero.is_normal()); + assert!(!neg_zero.is_normal()); + assert!(one.is_normal()); + assert!(Float::MIN_POSITIVE_NORMAL.is_normal()); + assert!(!Float::MAX_SUBNORMAL.is_normal()); + } +} + +float_test! { + name: classify, + attrs: { + f16: #[cfg(any(miri, target_has_reliable_f16))], + }, + test { + let nan: Float = Float::NAN; + let inf: Float = Float::INFINITY; + let neg_inf: Float = Float::NEG_INFINITY; + let zero: Float = 0.0; + let neg_zero: Float = -0.0; + let one: Float = 1.0; + assert!(matches!(nan.classify(), Fp::Nan)); + assert!(matches!(inf.classify(), Fp::Infinite)); + assert!(matches!(neg_inf.classify(), Fp::Infinite)); + assert!(matches!(zero.classify(), Fp::Zero)); + assert!(matches!(neg_zero.classify(), Fp::Zero)); + assert!(matches!(one.classify(), Fp::Normal)); + assert!(matches!(Float::MIN_POSITIVE_NORMAL.classify(), Fp::Normal)); + assert!(matches!(Float::MAX_SUBNORMAL.classify(), Fp::Subnormal)); + } +} + float_test! { name: min, attrs: { diff --git a/library/coretests/tests/io/borrowed_buf.rs b/library/coretests/tests/io/borrowed_buf.rs index fbd3864dcac14..4074148436cf6 100644 --- a/library/coretests/tests/io/borrowed_buf.rs +++ b/library/coretests/tests/io/borrowed_buf.rs @@ -61,7 +61,7 @@ fn clear() { assert_eq!(rbuf.filled().len(), 0); assert_eq!(rbuf.unfilled().capacity(), 16); - assert_eq!(rbuf.unfilled().init_ref(), [255; 16]); + assert_eq!(rbuf.unfilled().init_mut(), [255; 16]); } #[test] @@ -124,7 +124,7 @@ fn reborrow_written() { assert_eq!(cursor2.written(), 32); assert_eq!(cursor.written(), 32); - assert_eq!(buf.unfilled().written(), 0); + assert_eq!(buf.unfilled().written(), 32); assert_eq!(buf.init_len(), 32); assert_eq!(buf.filled().len(), 32); let filled = buf.filled(); @@ -142,9 +142,7 @@ fn cursor_set_init() { } assert_eq!(rbuf.init_len(), 8); - assert_eq!(rbuf.unfilled().init_ref().len(), 8); assert_eq!(rbuf.unfilled().init_mut().len(), 8); - assert_eq!(rbuf.unfilled().uninit_mut().len(), 8); assert_eq!(unsafe { rbuf.unfilled().as_mut().len() }, 16); rbuf.unfilled().advance(4); @@ -160,8 +158,42 @@ fn cursor_set_init() { } assert_eq!(rbuf.init_len(), 12); - assert_eq!(rbuf.unfilled().init_ref().len(), 8); assert_eq!(rbuf.unfilled().init_mut().len(), 8); - assert_eq!(rbuf.unfilled().uninit_mut().len(), 4); assert_eq!(unsafe { rbuf.unfilled().as_mut().len() }, 12); } + +#[test] +fn cursor_with_unfilled_buf() { + let buf: &mut [_] = &mut [MaybeUninit::uninit(); 16]; + let mut rbuf = BorrowedBuf::from(buf); + let mut cursor = rbuf.unfilled(); + + cursor.with_unfilled_buf(|buf| { + buf.unfilled().append(&[1, 2, 3]); + assert_eq!(buf.filled(), &[1, 2, 3]); + }); + + assert_eq!(cursor.init_mut().len(), 0); + assert_eq!(cursor.written(), 3); + + cursor.with_unfilled_buf(|buf| { + assert_eq!(buf.capacity(), 13); + assert_eq!(buf.init_len(), 0); + + buf.unfilled().ensure_init(); + buf.unfilled().advance(4); + }); + + assert_eq!(cursor.init_mut().len(), 9); + assert_eq!(cursor.written(), 7); + + cursor.with_unfilled_buf(|buf| { + assert_eq!(buf.capacity(), 9); + assert_eq!(buf.init_len(), 9); + }); + + assert_eq!(cursor.init_mut().len(), 9); + assert_eq!(cursor.written(), 7); + + assert_eq!(rbuf.filled(), &[1, 2, 3, 0, 0, 0, 0]); +} diff --git a/library/coretests/tests/iter/adapters/cloned.rs b/library/coretests/tests/iter/adapters/cloned.rs index 78babb7feab18..0a3dd1ce46e81 100644 --- a/library/coretests/tests/iter/adapters/cloned.rs +++ b/library/coretests/tests/iter/adapters/cloned.rs @@ -31,7 +31,8 @@ fn test_cloned_side_effects() { .zip(&[1]); for _ in iter {} } - assert_eq!(count, 2); + // Zip documentation provides some leeway about side-effects + assert!([1, 2].iter().any(|v| *v == count)); } #[test] diff --git a/library/coretests/tests/iter/adapters/zip.rs b/library/coretests/tests/iter/adapters/zip.rs index 70392dca0fa17..063e226a61ce0 100644 --- a/library/coretests/tests/iter/adapters/zip.rs +++ b/library/coretests/tests/iter/adapters/zip.rs @@ -107,9 +107,19 @@ fn test_zip_next_back_side_effects_exhausted() { iter.next(); iter.next(); iter.next(); - iter.next(); + assert_eq!(iter.next(), None); assert_eq!(iter.next_back(), None); - assert_eq!(a, vec![1, 2, 3, 4, 6, 5]); + + assert!(a.starts_with(&[1, 2, 3])); + let a_len = a.len(); + // Tail-side-effects of forward-iteration are "at most one" per next(). + // And for reverse iteration we don't guarantee much either. + // But we can put some bounds on the possible behaviors. + assert!(a_len <= 6); + assert!(a_len >= 3); + a.sort(); + assert_eq!(a, &[1, 2, 3, 4, 5, 6][..a.len()]); + assert_eq!(b, vec![200, 300, 400]); } @@ -120,7 +130,8 @@ fn test_zip_cloned_sideffectful() { for _ in xs.iter().cloned().zip(ys.iter().cloned()) {} - assert_eq!(&xs, &[1, 1, 1, 0][..]); + // Zip documentation permits either case. + assert!([&[1, 1, 1, 0], &[1, 1, 0, 0]].iter().any(|v| &xs == *v)); assert_eq!(&ys, &[1, 1][..]); let xs = [CountClone::new(), CountClone::new()]; @@ -139,7 +150,8 @@ fn test_zip_map_sideffectful() { for _ in xs.iter_mut().map(|x| *x += 1).zip(ys.iter_mut().map(|y| *y += 1)) {} - assert_eq!(&xs, &[1, 1, 1, 1, 1, 0]); + // Zip documentation permits either case. + assert!([&[1, 1, 1, 1, 1, 0], &[1, 1, 1, 1, 0, 0]].iter().any(|v| &xs == *v)); assert_eq!(&ys, &[1, 1, 1, 1]); let mut xs = [0; 4]; @@ -168,7 +180,8 @@ fn test_zip_map_rev_sideffectful() { { let mut it = xs.iter_mut().map(|x| *x += 1).zip(ys.iter_mut().map(|y| *y += 1)); - (&mut it).take(5).count(); + // the current impl only trims the tails if the iterator isn't exhausted + (&mut it).take(3).count(); it.next_back(); } assert_eq!(&xs, &[1, 1, 1, 1, 1, 1]); @@ -211,9 +224,18 @@ fn test_zip_nth_back_side_effects_exhausted() { iter.next(); iter.next(); iter.next(); - iter.next(); + assert_eq!(iter.next(), None); assert_eq!(iter.nth_back(0), None); - assert_eq!(a, vec![1, 2, 3, 4, 6, 5]); + assert!(a.starts_with(&[1, 2, 3])); + let a_len = a.len(); + // Tail-side-effects of forward-iteration are "at most one" per next(). + // And for reverse iteration we don't guarantee much either. + // But we can put some bounds on the possible behaviors. + assert!(a_len <= 6); + assert!(a_len >= 3); + a.sort(); + assert_eq!(a, &[1, 2, 3, 4, 5, 6][..a.len()]); + assert_eq!(b, vec![200, 300, 400]); } @@ -237,32 +259,6 @@ fn test_zip_trusted_random_access_composition() { assert_eq!(z2.next().unwrap(), ((1, 1), 1)); } -#[test] -#[cfg(panic = "unwind")] -fn test_zip_trusted_random_access_next_back_drop() { - use std::panic::{AssertUnwindSafe, catch_unwind}; - - let mut counter = 0; - - let it = [42].iter().map(|e| { - let c = counter; - counter += 1; - if c == 0 { - panic!("bomb"); - } - - e - }); - let it2 = [(); 0].iter(); - let mut zip = it.zip(it2); - catch_unwind(AssertUnwindSafe(|| { - zip.next_back(); - })) - .unwrap_err(); - assert!(zip.next().is_none()); - assert_eq!(counter, 1); -} - #[test] fn test_double_ended_zip() { let xs = [1, 2, 3, 4, 5, 6]; @@ -275,6 +271,63 @@ fn test_double_ended_zip() { assert_eq!(it.next(), None); } +#[test] +#[cfg(panic = "unwind")] +/// Regression test for #137255 +/// A previous implementation of Zip TrustedRandomAccess specializations tried to do a lot of work +/// to preserve side-effects of equalizing the iterator lengths during backwards iteration. +/// This lead to several cases of unsoundness, twice due to being left in an inconsistent state +/// after panics. +/// The new implementation does not try as hard, but we still need panic-safety. +fn test_nested_zip_panic_safety() { + use std::panic::{AssertUnwindSafe, catch_unwind, resume_unwind}; + use std::sync::atomic::{AtomicUsize, Ordering}; + + let mut panic = true; + // keeps track of how often element get visited, must be at most once each + let witness = [8, 9, 10, 11, 12].map(|i| (i, AtomicUsize::new(0))); + let a = witness.as_slice().iter().map(|e| { + e.1.fetch_add(1, Ordering::Relaxed); + if panic { + panic = false; + resume_unwind(Box::new(())) + } + e.0 + }); + // shorter than `a`, so `a` will get trimmed + let b = [1, 2, 3, 4].as_slice().iter().copied(); + // shorter still, so `ab` will get trimmed.` + let c = [5, 6, 7].as_slice().iter().copied(); + + // This will panic during backwards trimming. + let ab = zip(a, b); + // This being Zip + TrustedRandomAccess means it will only call `next_back`` + // during trimming and otherwise do calls `__iterator_get_unchecked` on `ab`. + let mut abc = zip(ab, c); + + assert_eq!(abc.len(), 3); + // This will first trigger backwards trimming before it would normally obtain the + // actual element if it weren't for the panic. + // This used to corrupt the internal state of `abc`, which then lead to + // TrustedRandomAccess safety contract violations in calls to `ab`, + // which ultimately lead to UB. + catch_unwind(AssertUnwindSafe(|| abc.next_back())).ok(); + // check for sane outward behavior after the panic, which indicates a sane internal state. + // Technically these outcomes are not required because a panic frees us from correctness obligations. + assert_eq!(abc.len(), 2); + assert_eq!(abc.next(), Some(((8, 1), 5))); + assert_eq!(abc.next_back(), Some(((9, 2), 6))); + for (i, (_, w)) in witness.iter().enumerate() { + let v = w.load(Ordering::Relaxed); + // required by TRA contract + assert!(v <= 1, "expected idx {i} to be visited at most once, actual: {v}"); + } + // Trimming panicked and should only run once, so this one won't be visited. + // Implementation detail, but not trying to run it again is what keeps + // things simple. + assert_eq!(witness[3].1.load(Ordering::Relaxed), 0); +} + #[test] fn test_issue_82282() { fn overflowed_zip(arr: &[i32]) -> impl Iterator { diff --git a/library/coretests/tests/lib.rs b/library/coretests/tests/lib.rs index fdef736c0c0f7..4cfac9ecc2ab7 100644 --- a/library/coretests/tests/lib.rs +++ b/library/coretests/tests/lib.rs @@ -19,7 +19,7 @@ #![feature(const_deref)] #![feature(const_destruct)] #![feature(const_eval_select)] -#![feature(const_float_round_methods)] +#![feature(const_ops)] #![feature(const_ref_cell)] #![feature(const_trait_impl)] #![feature(core_float_math)] diff --git a/library/proc_macro/Cargo.toml b/library/proc_macro/Cargo.toml index 8ea92088a84ab..0042a6e8ece58 100644 --- a/library/proc_macro/Cargo.toml +++ b/library/proc_macro/Cargo.toml @@ -9,7 +9,7 @@ std = { path = "../std" } # `core` when resolving doc links. Without this line a different `core` will be # loaded from sysroot causing duplicate lang items and other similar errors. core = { path = "../core" } -rustc-literal-escaper = { version = "0.0.4", features = ["rustc-dep-of-std"] } +rustc-literal-escaper = { version = "0.0.5", features = ["rustc-dep-of-std"] } [features] default = ["rustc-dep-of-std"] diff --git a/library/proc_macro/src/lib.rs b/library/proc_macro/src/lib.rs index ff326342989bc..162b4fdcc8ae2 100644 --- a/library/proc_macro/src/lib.rs +++ b/library/proc_macro/src/lib.rs @@ -1471,11 +1471,11 @@ impl Literal { let mut error = None; let mut buf = Vec::with_capacity(symbol.len()); - unescape_c_str(symbol, |_span, c| match c { + unescape_c_str(symbol, |_span, res| match res { Ok(MixedUnit::Char(c)) => { - buf.extend_from_slice(c.encode_utf8(&mut [0; 4]).as_bytes()) + buf.extend_from_slice(c.get().encode_utf8(&mut [0; 4]).as_bytes()) } - Ok(MixedUnit::HighByte(b)) => buf.push(b), + Ok(MixedUnit::HighByte(b)) => buf.push(b.get()), Err(err) => { if err.is_fatal() { error = Some(ConversionErrorKind::FailedToUnescape(err)); diff --git a/library/std/Cargo.toml b/library/std/Cargo.toml index 7a20d8bc919e7..cde8654e3a0de 100644 --- a/library/std/Cargo.toml +++ b/library/std/Cargo.toml @@ -23,7 +23,7 @@ unwind = { path = "../unwind" } hashbrown = { version = "0.15", default-features = false, features = [ 'rustc-dep-of-std', ] } -std_detect = { path = "../stdarch/crates/std_detect", default-features = false, features = [ +std_detect = { path = "../stdarch/crates/std_detect", public = true, default-features = false, features = [ 'rustc-dep-of-std', ] } safety = { path = "../contracts/safety" } @@ -93,6 +93,9 @@ backtrace = [ 'object/rustc-dep-of-std', 'miniz_oxide/rustc-dep-of-std', ] +# Disable symbolization in backtraces. For use with -Zbuild-std. +# FIXME: Ideally this should be an additive backtrace-symbolization feature +backtrace-trace-only = [] panic-unwind = ["dep:panic_unwind"] compiler-builtins-c = ["alloc/compiler-builtins-c"] diff --git a/library/std/src/io/mod.rs b/library/std/src/io/mod.rs index 17c32d7a571c8..d351ee5e739d3 100644 --- a/library/std/src/io/mod.rs +++ b/library/std/src/io/mod.rs @@ -489,7 +489,7 @@ pub(crate) fn default_read_to_end( } }; - let unfilled_but_initialized = cursor.init_ref().len(); + let unfilled_but_initialized = cursor.init_mut().len(); let bytes_read = cursor.written(); let was_fully_initialized = read_buf.init_len() == buf_len; @@ -3053,7 +3053,7 @@ impl Read for Take { // The condition above guarantees that `self.limit` fits in `usize`. let limit = self.limit as usize; - let extra_init = cmp::min(limit, buf.init_ref().len()); + let extra_init = cmp::min(limit, buf.init_mut().len()); // SAFETY: no uninit data is written to ibuf let ibuf = unsafe { &mut buf.as_mut()[..limit] }; @@ -3068,7 +3068,7 @@ impl Read for Take { let mut cursor = sliced_buf.unfilled(); let result = self.inner.read_buf(cursor.reborrow()); - let new_init = cursor.init_ref().len(); + let new_init = cursor.init_mut().len(); let filled = sliced_buf.len(); // cursor / sliced_buf / ibuf must drop here diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs index d8a60b6ae970a..e5b3c2f34cc48 100644 --- a/library/std/src/lib.rs +++ b/library/std/src/lib.rs @@ -330,7 +330,6 @@ #![feature(bstr_internals)] #![feature(char_internals)] #![feature(clone_to_uninit)] -#![feature(const_float_round_methods)] #![feature(core_intrinsics)] #![feature(core_io_borrowed_buf)] #![feature(duration_constants)] diff --git a/library/std/src/num/f32.rs b/library/std/src/num/f32.rs index e79ec2ae966ff..2bff73add33d6 100644 --- a/library/std/src/num/f32.rs +++ b/library/std/src/num/f32.rs @@ -44,7 +44,7 @@ impl f32 { #[rustc_allow_incoherent_impl] #[must_use = "method returns a new number and does not mutate the original value"] #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] + #[rustc_const_stable(feature = "const_float_round_methods", since = "CURRENT_RUSTC_VERSION")] #[inline] pub const fn floor(self) -> f32 { core::f32::math::floor(self) @@ -67,7 +67,7 @@ impl f32 { #[rustc_allow_incoherent_impl] #[must_use = "method returns a new number and does not mutate the original value"] #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] + #[rustc_const_stable(feature = "const_float_round_methods", since = "CURRENT_RUSTC_VERSION")] #[inline] pub const fn ceil(self) -> f32 { core::f32::math::ceil(self) @@ -96,7 +96,7 @@ impl f32 { #[rustc_allow_incoherent_impl] #[must_use = "method returns a new number and does not mutate the original value"] #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] + #[rustc_const_stable(feature = "const_float_round_methods", since = "CURRENT_RUSTC_VERSION")] #[inline] pub const fn round(self) -> f32 { core::f32::math::round(self) @@ -123,7 +123,7 @@ impl f32 { #[rustc_allow_incoherent_impl] #[must_use = "method returns a new number and does not mutate the original value"] #[stable(feature = "round_ties_even", since = "1.77.0")] - #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] + #[rustc_const_stable(feature = "const_float_round_methods", since = "CURRENT_RUSTC_VERSION")] #[inline] pub const fn round_ties_even(self) -> f32 { core::f32::math::round_ties_even(self) @@ -149,7 +149,7 @@ impl f32 { #[rustc_allow_incoherent_impl] #[must_use = "method returns a new number and does not mutate the original value"] #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] + #[rustc_const_stable(feature = "const_float_round_methods", since = "CURRENT_RUSTC_VERSION")] #[inline] pub const fn trunc(self) -> f32 { core::f32::math::trunc(self) @@ -173,7 +173,7 @@ impl f32 { #[rustc_allow_incoherent_impl] #[must_use = "method returns a new number and does not mutate the original value"] #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] + #[rustc_const_stable(feature = "const_float_round_methods", since = "CURRENT_RUSTC_VERSION")] #[inline] pub const fn fract(self) -> f32 { core::f32::math::fract(self) diff --git a/library/std/src/num/f64.rs b/library/std/src/num/f64.rs index 853417825f97a..b71e319f4074f 100644 --- a/library/std/src/num/f64.rs +++ b/library/std/src/num/f64.rs @@ -44,7 +44,7 @@ impl f64 { #[rustc_allow_incoherent_impl] #[must_use = "method returns a new number and does not mutate the original value"] #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] + #[rustc_const_stable(feature = "const_float_round_methods", since = "CURRENT_RUSTC_VERSION")] #[inline] pub const fn floor(self) -> f64 { core::f64::math::floor(self) @@ -67,7 +67,7 @@ impl f64 { #[rustc_allow_incoherent_impl] #[must_use = "method returns a new number and does not mutate the original value"] #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] + #[rustc_const_stable(feature = "const_float_round_methods", since = "CURRENT_RUSTC_VERSION")] #[inline] pub const fn ceil(self) -> f64 { core::f64::math::ceil(self) @@ -96,7 +96,7 @@ impl f64 { #[rustc_allow_incoherent_impl] #[must_use = "method returns a new number and does not mutate the original value"] #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] + #[rustc_const_stable(feature = "const_float_round_methods", since = "CURRENT_RUSTC_VERSION")] #[inline] pub const fn round(self) -> f64 { core::f64::math::round(self) @@ -123,7 +123,7 @@ impl f64 { #[rustc_allow_incoherent_impl] #[must_use = "method returns a new number and does not mutate the original value"] #[stable(feature = "round_ties_even", since = "1.77.0")] - #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] + #[rustc_const_stable(feature = "const_float_round_methods", since = "CURRENT_RUSTC_VERSION")] #[inline] pub const fn round_ties_even(self) -> f64 { core::f64::math::round_ties_even(self) @@ -149,7 +149,7 @@ impl f64 { #[rustc_allow_incoherent_impl] #[must_use = "method returns a new number and does not mutate the original value"] #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] + #[rustc_const_stable(feature = "const_float_round_methods", since = "CURRENT_RUSTC_VERSION")] #[inline] pub const fn trunc(self) -> f64 { core::f64::math::trunc(self) @@ -173,7 +173,7 @@ impl f64 { #[rustc_allow_incoherent_impl] #[must_use = "method returns a new number and does not mutate the original value"] #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] + #[rustc_const_stable(feature = "const_float_round_methods", since = "CURRENT_RUSTC_VERSION")] #[inline] pub const fn fract(self) -> f64 { core::f64::math::fract(self) diff --git a/library/std/src/prelude/v1.rs b/library/std/src/prelude/v1.rs index 69f0335315356..70c1113155656 100644 --- a/library/std/src/prelude/v1.rs +++ b/library/std/src/prelude/v1.rs @@ -67,7 +67,7 @@ pub use core::prelude::v1::{ alloc_error_handler, bench, derive, global_allocator, test, test_case, }; -#[unstable(feature = "derive_const", issue = "none")] +#[unstable(feature = "derive_const", issue = "118304")] pub use core::prelude::v1::derive_const; // Do not `doc(no_inline)` either. diff --git a/library/std/src/random.rs b/library/std/src/random.rs index e7d4ab81df0ac..3994c5cfaf6f4 100644 --- a/library/std/src/random.rs +++ b/library/std/src/random.rs @@ -1,7 +1,4 @@ //! Random value generation. -//! -//! The [`Random`] trait allows generating a random value for a type using a -//! given [`RandomSource`]. #[unstable(feature = "random", issue = "130703")] pub use core::random::*; @@ -68,18 +65,11 @@ impl RandomSource for DefaultRandomSource { } } -/// Generates a random value with the default random source. +/// Generates a random value from a distribution, using the default random source. /// -/// This is a convenience function for `T::random(&mut DefaultRandomSource)` and -/// will sample according to the same distribution as the underlying [`Random`] -/// trait implementation. See [`DefaultRandomSource`] for more information about -/// how randomness is sourced. -/// -/// **Warning:** Be careful when manipulating random values! The -/// [`random`](Random::random) method on integers samples them with a uniform -/// distribution, so a value of 1 is just as likely as [`i32::MAX`]. By using -/// modulo operations, some of the resulting values can become more likely than -/// others. Use audited crates when in doubt. +/// This is a convenience function for `dist.sample(&mut DefaultRandomSource)` and will sample +/// according to the same distribution as the underlying [`Distribution`] trait implementation. See +/// [`DefaultRandomSource`] for more information about how randomness is sourced. /// /// # Examples /// @@ -89,7 +79,7 @@ impl RandomSource for DefaultRandomSource { /// /// use std::random::random; /// -/// let bits: u128 = random(); +/// let bits: u128 = random(..); /// let g1 = (bits >> 96) as u32; /// let g2 = (bits >> 80) as u16; /// let g3 = (0x4000 | (bits >> 64) & 0x0fff) as u16; @@ -101,6 +91,6 @@ impl RandomSource for DefaultRandomSource { /// /// [version 4/variant 1 UUID]: https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_(random) #[unstable(feature = "random", issue = "130703")] -pub fn random() -> T { - T::random(&mut DefaultRandomSource) +pub fn random(dist: impl Distribution) -> T { + dist.sample(&mut DefaultRandomSource) } diff --git a/library/std/src/sync/poison.rs b/library/std/src/sync/poison.rs index 571f0d14248e1..0c05f152ef84c 100644 --- a/library/std/src/sync/poison.rs +++ b/library/std/src/sync/poison.rs @@ -10,7 +10,7 @@ //! (some invariant is not being upheld). //! //! The specifics of how this "poisoned" state affects other threads -//! depend on the primitive. See [#Overview] bellow. +//! depend on the primitive. See [#Overview] below. //! //! For the alternative implementations that do not employ poisoning, //! see `std::sync::nonpoisoning`. diff --git a/library/std/src/sys/backtrace.rs b/library/std/src/sys/backtrace.rs index efa6a896dad8f..272d0fa4d1a19 100644 --- a/library/std/src/sys/backtrace.rs +++ b/library/std/src/sys/backtrace.rs @@ -68,61 +68,67 @@ unsafe fn _print_fmt(fmt: &mut fmt::Formatter<'_>, print_fmt: PrintFmt) -> fmt:: return false; } - let mut hit = false; - backtrace_rs::resolve_frame_unsynchronized(frame, |symbol| { - hit = true; - - // `__rust_end_short_backtrace` means we are done hiding symbols - // for now. Print until we see `__rust_begin_short_backtrace`. - if print_fmt == PrintFmt::Short { - if let Some(sym) = symbol.name().and_then(|s| s.as_str()) { - if sym.contains("__rust_end_short_backtrace") { - print = true; - return; - } - if print && sym.contains("__rust_begin_short_backtrace") { - print = false; - return; - } - if !print { - omitted_count += 1; + if cfg!(feature = "backtrace-trace-only") { + const HEX_WIDTH: usize = 2 + 2 * size_of::(); + let frame_ip = frame.ip(); + res = writeln!(bt_fmt.formatter(), "{idx:4}: {frame_ip:HEX_WIDTH$?}"); + } else { + let mut hit = false; + backtrace_rs::resolve_frame_unsynchronized(frame, |symbol| { + hit = true; + + // `__rust_end_short_backtrace` means we are done hiding symbols + // for now. Print until we see `__rust_begin_short_backtrace`. + if print_fmt == PrintFmt::Short { + if let Some(sym) = symbol.name().and_then(|s| s.as_str()) { + if sym.contains("__rust_end_short_backtrace") { + print = true; + return; + } + if print && sym.contains("__rust_begin_short_backtrace") { + print = false; + return; + } + if !print { + omitted_count += 1; + } } } - } - if print { - if omitted_count > 0 { - debug_assert!(print_fmt == PrintFmt::Short); - // only print the message between the middle of frames - if !first_omit { - let _ = writeln!( - bt_fmt.formatter(), - " [... omitted {} frame{} ...]", - omitted_count, - if omitted_count > 1 { "s" } else { "" } - ); + if print { + if omitted_count > 0 { + debug_assert!(print_fmt == PrintFmt::Short); + // only print the message between the middle of frames + if !first_omit { + let _ = writeln!( + bt_fmt.formatter(), + " [... omitted {} frame{} ...]", + omitted_count, + if omitted_count > 1 { "s" } else { "" } + ); + } + first_omit = false; + omitted_count = 0; } - first_omit = false; - omitted_count = 0; + res = bt_fmt.frame().symbol(frame, symbol); } - res = bt_fmt.frame().symbol(frame, symbol); + }); + #[cfg(target_os = "nto")] + if libc::__my_thread_exit as *mut libc::c_void == frame.ip() { + if !hit && print { + use crate::backtrace_rs::SymbolName; + res = bt_fmt.frame().print_raw( + frame.ip(), + Some(SymbolName::new("__my_thread_exit".as_bytes())), + None, + None, + ); + } + return false; } - }); - #[cfg(target_os = "nto")] - if libc::__my_thread_exit as *mut libc::c_void == frame.ip() { if !hit && print { - use crate::backtrace_rs::SymbolName; - res = bt_fmt.frame().print_raw( - frame.ip(), - Some(SymbolName::new("__my_thread_exit".as_bytes())), - None, - None, - ); + res = bt_fmt.frame().print_raw(frame.ip(), None, None, None); } - return false; - } - if !hit && print { - res = bt_fmt.frame().print_raw(frame.ip(), None, None, None); } idx += 1; diff --git a/library/std/src/sys/net/connection/socket/windows.rs b/library/std/src/sys/net/connection/socket/windows.rs index ce975bb2289c2..b71d8b1357b5a 100644 --- a/library/std/src/sys/net/connection/socket/windows.rs +++ b/library/std/src/sys/net/connection/socket/windows.rs @@ -8,7 +8,8 @@ use crate::net::{Shutdown, SocketAddr}; use crate::os::windows::io::{ AsRawSocket, AsSocket, BorrowedSocket, FromRawSocket, IntoRawSocket, OwnedSocket, RawSocket, }; -use crate::sync::OnceLock; +use crate::sync::atomic::Atomic; +use crate::sync::atomic::Ordering::{AcqRel, Relaxed}; use crate::sys::c; use crate::sys_common::{AsInner, FromInner, IntoInner}; use crate::time::Duration; @@ -114,33 +115,38 @@ pub(super) mod netc { #[expect(missing_debug_implementations)] pub struct Socket(OwnedSocket); -static WSA_CLEANUP: OnceLock i32> = OnceLock::new(); +static WSA_INITIALIZED: Atomic = Atomic::::new(false); /// Checks whether the Windows socket interface has been started already, and /// if not, starts it. +#[inline] pub fn init() { - let _ = WSA_CLEANUP.get_or_init(|| unsafe { + if !WSA_INITIALIZED.load(Relaxed) { + wsa_startup(); + } +} + +#[cold] +fn wsa_startup() { + unsafe { let mut data: c::WSADATA = mem::zeroed(); let ret = c::WSAStartup( 0x202, // version 2.2 &mut data, ); assert_eq!(ret, 0); - - // Only register `WSACleanup` if `WSAStartup` is actually ever called. - // Workaround to prevent linking to `WS2_32.dll` when no network functionality is used. - // See issue #85441. - c::WSACleanup - }); + if WSA_INITIALIZED.swap(true, AcqRel) { + // If another thread raced with us and called WSAStartup first then call + // WSACleanup so it's as though WSAStartup was only called once. + c::WSACleanup(); + } + } } pub fn cleanup() { - // only perform cleanup if network functionality was actually initialized - if let Some(cleanup) = WSA_CLEANUP.get() { - unsafe { - cleanup(); - } - } + // We don't need to call WSACleanup here because exiting the process will cause + // the OS to clean everything for us, which is faster than doing it manually. + // See #141799. } /// Returns the last error from the Windows socket interface. diff --git a/library/std/src/sys/pal/sgx/abi/usercalls/mod.rs b/library/std/src/sys/pal/sgx/abi/usercalls/mod.rs index cbdaf439b2847..dea44124f458b 100644 --- a/library/std/src/sys/pal/sgx/abi/usercalls/mod.rs +++ b/library/std/src/sys/pal/sgx/abi/usercalls/mod.rs @@ -2,7 +2,7 @@ use crate::cmp; use crate::io::{ BorrowedCursor, Error as IoError, ErrorKind, IoSlice, IoSliceMut, Result as IoResult, }; -use crate::random::{DefaultRandomSource, Random}; +use crate::random::random; use crate::time::{Duration, Instant}; pub(crate) mod alloc; @@ -179,7 +179,7 @@ pub fn wait(event_mask: u64, mut timeout: u64) -> IoResult { // trusted to ensure accurate timeouts. if let Ok(timeout_signed) = i64::try_from(timeout) { let tenth = timeout_signed / 10; - let deviation = i64::random(&mut DefaultRandomSource).checked_rem(tenth).unwrap_or(0); + let deviation = random::(..).checked_rem(tenth).unwrap_or(0); timeout = timeout_signed.saturating_add(deviation) as _; } } diff --git a/library/std/src/sys/pal/windows/c.rs b/library/std/src/sys/pal/windows/c.rs index eee169d410a9c..edac5262a4e9a 100644 --- a/library/std/src/sys/pal/windows/c.rs +++ b/library/std/src/sys/pal/windows/c.rs @@ -197,7 +197,7 @@ compat_fn_optional! { pub fn WakeByAddressSingle(address: *const c_void); } -#[cfg(any(target_vendor = "win7", target_vendor = "uwp"))] +#[cfg(any(target_vendor = "win7"))] compat_fn_with_fallback! { pub static NTDLL: &CStr = c"ntdll"; @@ -228,65 +228,14 @@ compat_fn_with_fallback! { ) -> NTSTATUS { panic!("keyed events not available") } +} - // These functions are available on UWP when lazily loaded. They will fail WACK if loaded statically. - #[cfg(target_vendor = "uwp")] - pub fn NtCreateFile( - filehandle: *mut HANDLE, - desiredaccess: FILE_ACCESS_RIGHTS, - objectattributes: *const OBJECT_ATTRIBUTES, - iostatusblock: *mut IO_STATUS_BLOCK, - allocationsize: *const i64, - fileattributes: FILE_FLAGS_AND_ATTRIBUTES, - shareaccess: FILE_SHARE_MODE, - createdisposition: NTCREATEFILE_CREATE_DISPOSITION, - createoptions: NTCREATEFILE_CREATE_OPTIONS, - eabuffer: *const c_void, - ealength: u32 - ) -> NTSTATUS { - STATUS_NOT_IMPLEMENTED - } - #[cfg(target_vendor = "uwp")] - pub fn NtOpenFile( - filehandle: *mut HANDLE, - desiredaccess: u32, - objectattributes: *const OBJECT_ATTRIBUTES, - iostatusblock: *mut IO_STATUS_BLOCK, - shareaccess: u32, - openoptions: u32 - ) -> NTSTATUS { - STATUS_NOT_IMPLEMENTED - } - #[cfg(target_vendor = "uwp")] - pub fn NtReadFile( - filehandle: HANDLE, - event: HANDLE, - apcroutine: PIO_APC_ROUTINE, - apccontext: *const c_void, - iostatusblock: *mut IO_STATUS_BLOCK, - buffer: *mut c_void, - length: u32, - byteoffset: *const i64, - key: *const u32 - ) -> NTSTATUS { - STATUS_NOT_IMPLEMENTED - } - #[cfg(target_vendor = "uwp")] - pub fn NtWriteFile( - filehandle: HANDLE, - event: HANDLE, - apcroutine: PIO_APC_ROUTINE, - apccontext: *const c_void, - iostatusblock: *mut IO_STATUS_BLOCK, - buffer: *const c_void, - length: u32, - byteoffset: *const i64, - key: *const u32 - ) -> NTSTATUS { - STATUS_NOT_IMPLEMENTED - } - #[cfg(target_vendor = "uwp")] - pub fn RtlNtStatusToDosError(Status: NTSTATUS) -> u32 { - Status as u32 +cfg_if::cfg_if! { + if #[cfg(target_vendor = "uwp")] { + windows_targets::link_raw_dylib!("ntdll.dll" "system" fn NtCreateFile(filehandle : *mut HANDLE, desiredaccess : FILE_ACCESS_RIGHTS, objectattributes : *const OBJECT_ATTRIBUTES, iostatusblock : *mut IO_STATUS_BLOCK, allocationsize : *const i64, fileattributes : FILE_FLAGS_AND_ATTRIBUTES, shareaccess : FILE_SHARE_MODE, createdisposition : NTCREATEFILE_CREATE_DISPOSITION, createoptions : NTCREATEFILE_CREATE_OPTIONS, eabuffer : *const core::ffi::c_void, ealength : u32) -> NTSTATUS); + windows_targets::link_raw_dylib!("ntdll.dll" "system" fn NtOpenFile(filehandle : *mut HANDLE, desiredaccess : u32, objectattributes : *const OBJECT_ATTRIBUTES, iostatusblock : *mut IO_STATUS_BLOCK, shareaccess : u32, openoptions : u32) -> NTSTATUS); + windows_targets::link_raw_dylib!("ntdll.dll" "system" fn NtReadFile(filehandle : HANDLE, event : HANDLE, apcroutine : PIO_APC_ROUTINE, apccontext : *const core::ffi::c_void, iostatusblock : *mut IO_STATUS_BLOCK, buffer : *mut core::ffi::c_void, length : u32, byteoffset : *const i64, key : *const u32) -> NTSTATUS); + windows_targets::link_raw_dylib!("ntdll.dll" "system" fn NtWriteFile(filehandle : HANDLE, event : HANDLE, apcroutine : PIO_APC_ROUTINE, apccontext : *const core::ffi::c_void, iostatusblock : *mut IO_STATUS_BLOCK, buffer : *const core::ffi::c_void, length : u32, byteoffset : *const i64, key : *const u32) -> NTSTATUS); + windows_targets::link_raw_dylib!("ntdll.dll" "system" fn RtlNtStatusToDosError(status : NTSTATUS) -> u32); } } diff --git a/library/std/src/sys/sync/once/futex.rs b/library/std/src/sys/sync/once/futex.rs index 539f0fe89eaaa..407fdcebcf5cc 100644 --- a/library/std/src/sys/sync/once/futex.rs +++ b/library/std/src/sys/sync/once/futex.rs @@ -8,16 +8,18 @@ use crate::sys::futex::{Futex, Primitive, futex_wait, futex_wake_all}; // This means we only need one atomic value with 4 states: /// No initialization has run yet, and no thread is currently using the Once. -const INCOMPLETE: Primitive = 0; +const INCOMPLETE: Primitive = 3; /// Some thread has previously attempted to initialize the Once, but it panicked, /// so the Once is now poisoned. There are no other threads currently accessing /// this Once. -const POISONED: Primitive = 1; +const POISONED: Primitive = 2; /// Some thread is currently attempting to run initialization. It may succeed, /// so all future threads need to wait for it to finish. -const RUNNING: Primitive = 2; +const RUNNING: Primitive = 1; /// Initialization has completed and all future calls should finish immediately. -const COMPLETE: Primitive = 3; +/// By choosing this state as the all-zero state the `is_completed` check can be +/// a bit faster on some platforms. +const COMPLETE: Primitive = 0; // An additional bit indicates whether there are waiting threads: diff --git a/library/std/src/sys/sync/once/queue.rs b/library/std/src/sys/sync/once/queue.rs index 6a2ab0dcf1b33..49e15d65f25a2 100644 --- a/library/std/src/sys/sync/once/queue.rs +++ b/library/std/src/sys/sync/once/queue.rs @@ -74,11 +74,12 @@ pub struct OnceState { } // Four states that a Once can be in, encoded into the lower bits of -// `state_and_queue` in the Once structure. -const INCOMPLETE: usize = 0x0; -const POISONED: usize = 0x1; -const RUNNING: usize = 0x2; -const COMPLETE: usize = 0x3; +// `state_and_queue` in the Once structure. By choosing COMPLETE as the all-zero +// state the `is_completed` check can be a bit faster on some platforms. +const INCOMPLETE: usize = 0x3; +const POISONED: usize = 0x2; +const RUNNING: usize = 0x1; +const COMPLETE: usize = 0x0; // Mask to learn about the state. All other bits are the queue of waiters if // this is in the RUNNING state. diff --git a/library/std/src/thread/local.rs b/library/std/src/thread/local.rs index 7cd448733130d..0ad014ccd3e2e 100644 --- a/library/std/src/thread/local.rs +++ b/library/std/src/thread/local.rs @@ -469,6 +469,29 @@ impl LocalKey> { pub fn replace(&'static self, value: T) -> T { self.with(|cell| cell.replace(value)) } + + /// Updates the contained value using a function. + /// + /// # Examples + /// + /// ``` + /// #![feature(local_key_cell_update)] + /// use std::cell::Cell; + /// + /// thread_local! { + /// static X: Cell = const { Cell::new(5) }; + /// } + /// + /// X.update(|x| x + 1); + /// assert_eq!(X.get(), 6); + /// ``` + #[unstable(feature = "local_key_cell_update", issue = "143989")] + pub fn update(&'static self, f: impl FnOnce(T) -> T) + where + T: Copy, + { + self.with(|cell| cell.update(f)) + } } impl LocalKey> { diff --git a/library/sysroot/Cargo.toml b/library/sysroot/Cargo.toml index 290c2eeed44c6..032f5272a9cd4 100644 --- a/library/sysroot/Cargo.toml +++ b/library/sysroot/Cargo.toml @@ -20,6 +20,7 @@ test = { path = "../test", public = true } [features] default = ["std_detect_file_io", "std_detect_dlsym_getauxval", "panic-unwind"] backtrace = ["std/backtrace"] +backtrace-trace-only = ["std/backtrace-trace-only"] compiler-builtins-c = ["std/compiler-builtins-c"] compiler-builtins-mem = ["std/compiler-builtins-mem"] compiler-builtins-no-asm = ["std/compiler-builtins-no-asm"] diff --git a/library/windows_targets/src/lib.rs b/library/windows_targets/src/lib.rs index bce54c5ffcef0..9e82e6a720006 100644 --- a/library/windows_targets/src/lib.rs +++ b/library/windows_targets/src/lib.rs @@ -7,8 +7,7 @@ #![feature(decl_macro)] #![feature(no_core)] -#[cfg(feature = "windows_raw_dylib")] -pub macro link { +pub macro link_raw_dylib { ($library:literal $abi:literal $($link_name:literal)? $(#[$doc:meta])? fn $($function:tt)*) => ( #[cfg_attr(not(target_arch = "x86"), link(name = $library, kind = "raw-dylib", modifiers = "+verbatim"))] #[cfg_attr(target_arch = "x86", link(name = $library, kind = "raw-dylib", modifiers = "+verbatim", import_name_type = "undecorated"))] @@ -18,6 +17,26 @@ pub macro link { } ) } + +pub macro link_dylib { + ($library:literal $abi:literal $($link_name:literal)? $(#[$doc:meta])? fn $($function:tt)*) => ( + // Note: the windows-targets crate uses a pre-built Windows.lib import library which we don't + // have in this repo. So instead we always link kernel32.lib and add the rest of the import + // libraries below by using an empty extern block. This works because extern blocks are not + // connected to the library given in the #[link] attribute. + #[link(name = "kernel32")] + unsafe extern $abi { + $(#[link_name=$link_name])? + pub fn $($function)*; + } + ) +} + +#[cfg(feature = "windows_raw_dylib")] +pub macro link($($tt:tt)*) { + $crate::link_raw_dylib!($($tt)*) +} + #[cfg(not(feature = "windows_raw_dylib"))] pub macro link { ($library:literal $abi:literal $($link_name:literal)? $(#[$doc:meta])? fn $($function:tt)*) => ( diff --git a/rust-toolchain.toml b/rust-toolchain.toml index fed6a6a6c8410..82b33738ad241 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -2,5 +2,5 @@ # standard library we currently track. [toolchain] -channel = "nightly-2025-07-14" +channel = "nightly-2025-07-21" components = ["llvm-tools-preview", "rustc-dev", "rust-src", "rustfmt"] diff --git a/tool_config/kani-version.toml b/tool_config/kani-version.toml index a3cb1b760376d..8ad1d860155b6 100644 --- a/tool_config/kani-version.toml +++ b/tool_config/kani-version.toml @@ -2,4 +2,4 @@ # incompatible with the verify-std repo. [kani] -commit = "5af25b54e695e0480887ab2e978cd4dd05b7154c" +commit = "c1648416995a15b6ab33e44829e96c9a65541927" From 177d0fd25fbf3cb82c7b8408140158af1e408ede Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 30 Jul 2025 13:32:11 +0000 Subject: [PATCH 10/20] Merge subtree update for toolchain nightly-2025-07-30 (#432) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is an automated PR to merge library subtree updates from 2025-07-21 (rust-lang/rust@9982d6462bedf1e793f7b2dbd655a4e57cdf67d4) to 2025-07-30 (rust-lang/rust@ba7e63b63871a429533c189adbfb1d9a6337e000) (inclusive) into main. `git merge` resulted in conflicts, which require manual resolution. Files were commited with merge conflict markers. **Do not remove or edit the following annotations:** git-subtree-dir: library git-subtree-split: 2f8dad2e060ced429ebff5b6d464b3dea36b4e05 --------- Signed-off-by: xizheyin Signed-off-by: Ayush Singh Co-authored-by: Marijn Schouten Co-authored-by: Roger Curley Co-authored-by: Ralf Jung Co-authored-by: Josh Triplett Co-authored-by: Matthias Krüger <476013+matthiaskrgr@users.noreply.github.com> Co-authored-by: Folkert de Vries Co-authored-by: okaneco <47607823+okaneco@users.noreply.github.com> Co-authored-by: Folkert de Vries Co-authored-by: Sayantan Chakraborty <142906350+sayantn@users.noreply.github.com> Co-authored-by: nazo6 Co-authored-by: bors Co-authored-by: Jakub Beránek Co-authored-by: Orson Peters Co-authored-by: León Orell Valerian Liehr Co-authored-by: xizheyin Co-authored-by: Deadbeef Co-authored-by: René Kijewski Co-authored-by: Nik Revenco Co-authored-by: The Miri Cronjob Bot Co-authored-by: Tim (Theemathas) Chirananthavat Co-authored-by: Chris Denton Co-authored-by: Samuel Tardieu Co-authored-by: Travis Cross Co-authored-by: Amanieu d'Antras Co-authored-by: Oli Scherer Co-authored-by: David Mládek Co-authored-by: SunkenPotato <3.14radius02@gmail.com> Co-authored-by: Cameron Steffen Co-authored-by: Martin Ombura Jr <8682597+martinomburajr@users.noreply.github.com> Co-authored-by: Madhav Madhusoodanan Co-authored-by: sayantn Co-authored-by: Trevor Gross Co-authored-by: WANG Rui Co-authored-by: Luigi Sartor Piucco Co-authored-by: Julien THILLARD <54775010+supersurviveur@users.noreply.github.com> Co-authored-by: The rustc-josh-sync Cronjob Bot Co-authored-by: usamoi Co-authored-by: Rémy Rakic Co-authored-by: Nurzhan Sakén Co-authored-by: ltdk Co-authored-by: bjorn3 <17426603+bjorn3@users.noreply.github.com> Co-authored-by: Guillaume Gomez Co-authored-by: Evgenii Zheltonozhskii Co-authored-by: roblabla Co-authored-by: 许杰友 Jieyou Xu (Joe) <39484203+jieyouxu@users.noreply.github.com> Co-authored-by: Ayush Singh Co-authored-by: Kornel Co-authored-by: Jonas Platte Co-authored-by: Alex Crichton Co-authored-by: Zachary S Co-authored-by: Jeremy Smart Co-authored-by: Ivan Tadeu Ferreira Antunes Filho Co-authored-by: Alisa Sireneva Co-authored-by: Trevor Gross Co-authored-by: Jacob Pratt Co-authored-by: Scott McMurray Co-authored-by: xonx <119700621+xonx4l@users.noreply.github.com> Co-authored-by: Yosh Co-authored-by: joboet Co-authored-by: Stuart Cook Co-authored-by: Connor Tsui Co-authored-by: Aandreba Co-authored-by: Lucas Werkmeister Co-authored-by: gitbot Co-authored-by: Michael Tautschnig --- library/Cargo.lock | 17 +- library/alloc/Cargo.toml | 2 - .../alloc/src/collections/vec_deque/drain.rs | 2 +- library/alloc/src/lib.rs | 1 - library/alloc/src/raw_vec/mod.rs | 2 +- library/alloc/src/slice.rs | 4 - library/alloc/src/string.rs | 24 +- .../.github/workflows/rustc-pull.yml | 23 + library/compiler-builtins/CONTRIBUTING.md | 9 + library/compiler-builtins/Cargo.toml | 1 - .../builtins-test/Cargo.toml | 4 +- library/compiler-builtins/ci/bench-icount.sh | 2 +- .../compiler-builtins/src/mem/impls.rs | 5 +- .../compiler-builtins/src/mem/mod.rs | 13 +- .../crates/josh-sync/Cargo.toml | 8 - .../crates/josh-sync/src/main.rs | 45 - .../crates/josh-sync/src/sync.rs | 401 --- .../crates/libm-macros/Cargo.toml | 2 +- .../crates/musl-math-sys/Cargo.toml | 2 +- library/compiler-builtins/josh-sync.toml | 3 + .../compiler-builtins/libm-test/Cargo.toml | 4 +- .../libm/src/math/support/float_traits.rs | 2 + library/compiler-builtins/rust-version | 2 +- library/compiler-builtins/triagebot.toml | 5 +- library/core/src/array/mod.rs | 3 +- library/core/src/ascii/ascii_char.rs | 3 +- library/core/src/borrow.rs | 6 +- library/core/src/char/convert.rs | 15 +- library/core/src/clone.rs | 2 +- library/core/src/convert/mod.rs | 40 +- library/core/src/convert/num.rs | 24 +- library/core/src/iter/adapters/rev.rs | 19 + library/core/src/macros/mod.rs | 2 + library/core/src/mem/drop_guard.rs | 155 + library/core/src/mem/mod.rs | 6 +- library/core/src/net/ip_addr.rs | 36 +- library/core/src/net/socket_addr.rs | 9 +- library/core/src/num/error.rs | 6 +- library/core/src/num/flt2dec/mod.rs | 18 +- library/core/src/num/mod.rs | 3 +- library/core/src/num/nonzero.rs | 3 +- library/core/src/ops/deref.rs | 6 +- library/core/src/ops/range.rs | 30 + library/core/src/ops/try_trait.rs | 8 +- library/core/src/option.rs | 30 +- library/core/src/ptr/alignment.rs | 6 +- library/core/src/ptr/mod.rs | 6 +- library/core/src/ptr/unique.rs | 2 - library/core/src/range.rs | 18 + library/core/src/result.rs | 26 +- library/core/src/slice/iter.rs | 249 -- library/core/src/slice/mod.rs | 78 +- library/core/src/slice/sort/select.rs | 3 +- library/core/src/slice/sort/shared/pivot.rs | 10 +- .../core/src/slice/sort/stable/quicksort.rs | 4 - .../core/src/slice/sort/unstable/quicksort.rs | 3 +- library/core/src/str/iter.rs | 2 +- library/core/src/str/mod.rs | 40 +- library/core/src/str/pattern.rs | 19 +- library/core/src/str/traits.rs | 2 + library/core/src/sync/atomic.rs | 36 +- library/coretests/tests/lib.rs | 3 +- library/coretests/tests/macros.rs | 6 + library/coretests/tests/mem.rs | 46 + library/coretests/tests/num/dec2flt/float.rs | 11 +- .../coretests/tests/num/flt2dec/estimator.rs | 4 +- library/coretests/tests/num/flt2dec/mod.rs | 34 +- library/coretests/tests/num/mod.rs | 21 + library/coretests/tests/slice.rs | 184 -- library/rustc-std-workspace-alloc/Cargo.toml | 3 + library/rustc-std-workspace-core/Cargo.toml | 3 + library/rustc-std-workspace-std/Cargo.toml | 3 + library/std/Cargo.toml | 13 +- library/std/src/env.rs | 2 +- library/std/src/lib.rs | 2 +- library/std/src/sync/mod.rs | 2 + library/std/src/sync/nonpoison.rs | 37 + library/std/src/sync/nonpoison/mutex.rs | 611 ++++ library/std/src/sync/poison.rs | 6 +- library/std/src/sync/poison/condvar.rs | 2 +- library/std/src/sync/poison/mutex.rs | 2 +- .../std/src/sys/net/connection/uefi/mod.rs | 32 +- .../std/src/sys/net/connection/uefi/tcp.rs | 42 + .../std/src/sys/net/connection/uefi/tcp4.rs | 18 + library/std/src/sys/pal/hermit/thread.rs | 6 +- library/std/src/sys/pal/itron/thread.rs | 6 +- .../std/src/sys/pal/sgx/abi/usercalls/mod.rs | 2 +- library/std/src/sys/pal/sgx/thread.rs | 6 +- library/std/src/sys/pal/teeos/thread.rs | 6 +- library/std/src/sys/pal/uefi/helpers.rs | 4 + library/std/src/sys/pal/uefi/thread.rs | 6 +- library/std/src/sys/pal/unix/os.rs | 5 +- .../std/src/sys/pal/unix/stack_overflow.rs | 23 +- library/std/src/sys/pal/unix/thread.rs | 24 +- library/std/src/sys/pal/unsupported/thread.rs | 6 +- library/std/src/sys/pal/wasi/thread.rs | 4 +- .../std/src/sys/pal/wasm/atomics/thread.rs | 6 +- library/std/src/sys/pal/windows/thread.rs | 6 +- library/std/src/sys/pal/xous/thread.rs | 6 +- library/std/src/sys/random/sgx.rs | 14 +- library/std/src/sys/random/uefi.rs | 5 +- .../std/src/sys/thread_local/guard/windows.rs | 17 +- library/std/src/thread/mod.rs | 10 +- library/std/tests/sync/lib.rs | 55 + library/std/tests/sync/mutex.rs | 525 +-- .../crates => }/std_detect/Cargo.toml | 12 +- .../{stdarch/crates => }/std_detect/README.md | 2 +- .../std_detect/src/detect/arch/aarch64.rs | 0 .../std_detect/src/detect/arch/arm.rs | 0 .../std_detect/src/detect/arch/loongarch.rs | 2 +- .../std_detect/src/detect/arch/mips.rs | 0 .../std_detect/src/detect/arch/mips64.rs | 0 .../std_detect/src/detect/arch/mod.rs | 2 +- .../std_detect/src/detect/arch/powerpc.rs | 0 .../std_detect/src/detect/arch/powerpc64.rs | 0 .../std_detect/src/detect/arch/riscv.rs | 0 .../std_detect/src/detect/arch/s390x.rs | 0 .../std_detect/src/detect/arch/x86.rs | 0 .../crates => }/std_detect/src/detect/bit.rs | 0 .../std_detect/src/detect/cache.rs | 38 +- .../std_detect/src/detect/macros.rs | 5 +- .../crates => }/std_detect/src/detect/mod.rs | 2 +- .../std_detect/src/detect/os/aarch64.rs | 8 +- .../src/detect/os/darwin/aarch64.rs | 11 +- .../src/detect/os/freebsd/aarch64.rs | 0 .../std_detect/src/detect/os/freebsd/arm.rs | 0 .../src/detect/os/freebsd/auxvec.rs | 7 +- .../std_detect/src/detect/os/freebsd/mod.rs | 0 .../src/detect/os/freebsd/powerpc.rs | 0 .../std_detect/src/detect/os/linux/aarch64.rs | 92 +- .../src/detect/os/linux/aarch64/tests.rs | 77 + .../std_detect/src/detect/os/linux/arm.rs | 0 .../std_detect/src/detect/os/linux/auxvec.rs | 133 +- .../src/detect/os/linux/auxvec/tests.rs | 109 + .../src/detect/os/linux/loongarch.rs | 15 +- .../std_detect/src/detect/os/linux/mips.rs | 0 .../std_detect/src/detect/os/linux/mod.rs | 12 +- .../std_detect/src/detect/os/linux/powerpc.rs | 0 .../std_detect/src/detect/os/linux/riscv.rs | 11 +- .../std_detect/src/detect/os/linux/s390x.rs | 0 .../src/detect/os/openbsd/aarch64.rs | 4 +- .../std_detect/src/detect/os/other.rs | 0 .../std_detect/src/detect/os/riscv.rs | 68 +- .../std_detect/src/detect/os/riscv/tests.rs | 64 + .../src/detect/os/windows/aarch64.rs | 0 .../std_detect/src/detect/os/x86.rs | 39 +- .../test_data/linux-artificial-aarch64.auxv | Bin .../test_data/linux-empty-hwcap2-aarch64.auxv | Bin .../test_data/linux-hwcap2-aarch64.auxv | Bin .../test_data/linux-no-hwcap2-aarch64.auxv | Bin .../src/detect/test_data/linux-rpi3.auxv | Bin .../macos-virtualbox-linux-x86-4850HQ.auxv | Bin .../crates => }/std_detect/src/lib.rs | 0 .../std_detect/tests/cpu-detection.rs | 60 +- .../std_detect/tests/macro_trailing_commas.rs | 8 +- .../std_detect/tests/x86-specific.rs | 42 +- library/stdarch/.github/workflows/main.yml | 22 +- .../stdarch/.github/workflows/rustc-pull.yml | 22 + library/stdarch/Cargo.lock | 95 +- library/stdarch/Cargo.toml | 3 +- library/stdarch/README.md | 10 +- library/stdarch/ci/build-std-detect.sh | 46 - .../aarch64-unknown-linux-gnu/Dockerfile | 4 +- .../aarch64_be-unknown-linux-gnu/Dockerfile | 8 +- .../arm-unknown-linux-gnueabihf/Dockerfile | 2 +- .../armv7-unknown-linux-gnueabihf/Dockerfile | 2 +- .../docker/i586-unknown-linux-gnu/Dockerfile | 2 +- .../docker/i686-unknown-linux-gnu/Dockerfile | 2 +- .../loongarch64-unknown-linux-gnu/Dockerfile | 4 +- .../docker/mips-unknown-linux-gnu/Dockerfile | 2 +- .../mips64-unknown-linux-gnuabi64/Dockerfile | 2 +- .../Dockerfile | 2 +- .../mipsel-unknown-linux-musl/Dockerfile | 2 +- .../ci/docker/nvptx64-nvidia-cuda/Dockerfile | 2 +- .../powerpc-unknown-linux-gnu/Dockerfile | 2 +- .../powerpc64-unknown-linux-gnu/Dockerfile | 2 +- .../powerpc64le-unknown-linux-gnu/Dockerfile | 2 +- .../riscv32gc-unknown-linux-gnu/Dockerfile | 4 +- .../riscv64gc-unknown-linux-gnu/Dockerfile | 2 +- .../docker/s390x-unknown-linux-gnu/Dockerfile | 2 +- .../ci/docker/wasm32-wasip1/Dockerfile | 8 +- .../x86_64-unknown-linux-gnu/Dockerfile | 4 +- .../docker/x86_64-unknown-linux-gnu/cpuid.def | 36 +- library/stdarch/ci/dox.sh | 3 - library/stdarch/ci/run-docker.sh | 1 - library/stdarch/ci/run.sh | 16 +- .../core_arch/src/aarch64/neon/generated.rs | 596 +--- .../src/arm_shared/neon/generated.rs | 953 +++--- .../crates/core_arch/src/core_arch_docs.md | 2 + .../crates/core_arch/src/loongarch32/mod.rs | 47 + .../src/loongarch64/lasx/generated.rs | 2912 ++++++++--------- .../src/loongarch64/lsx/generated.rs | 2832 ++++++++-------- .../crates/core_arch/src/loongarch64/mod.rs | 254 +- .../core_arch/src/loongarch_shared/mod.rs | 242 ++ library/stdarch/crates/core_arch/src/mod.rs | 21 +- .../crates/core_arch/src/s390x/vector.rs | 25 +- .../crates/core_arch/src/wasm32/mod.rs | 10 + .../crates/intrinsic-test/src/arm/compile.rs | 83 +- .../crates/intrinsic-test/src/arm/config.rs | 1 - .../intrinsic-test/src/arm/intrinsic.rs | 15 +- .../intrinsic-test/src/arm/json_parser.rs | 2 +- .../crates/intrinsic-test/src/arm/mod.rs | 94 +- .../crates/intrinsic-test/src/arm/types.rs | 27 +- .../intrinsic-test/src/common/argument.rs | 32 +- .../crates/intrinsic-test/src/common/cli.rs | 6 +- .../intrinsic-test/src/common/compare.rs | 34 +- .../intrinsic-test/src/common/compile_c.rs | 137 +- .../crates/intrinsic-test/src/common/gen_c.rs | 323 +- .../intrinsic-test/src/common/gen_rust.rs | 99 +- .../src/common/intrinsic_helpers.rs | 88 +- .../intrinsic-test/src/common/write_file.rs | 33 - .../stdarch/crates/intrinsic-test/src/main.rs | 3 + .../stdarch/crates/simd-test-macro/src/lib.rs | 4 +- .../stdarch/crates/std_detect/LICENSE-APACHE | 201 -- library/stdarch/crates/std_detect/LICENSE-MIT | 25 - .../stdarch/crates/stdarch-gen-arm/Cargo.toml | 1 - .../spec/neon/aarch64.spec.yml | 194 +- .../spec/neon/arm_shared.spec.yml | 204 +- .../crates/stdarch-gen-arm/src/expression.rs | 8 +- .../stdarch-gen-arm/src/load_store_tests.rs | 20 +- .../crates/stdarch-gen-arm/src/typekinds.rs | 13 +- .../crates/stdarch-gen-arm/src/wildcards.rs | 9 +- .../crates/stdarch-gen-loongarch/README.md | 2 - .../crates/stdarch-gen-loongarch/src/main.rs | 46 +- .../stdarch/crates/stdarch-test/Cargo.toml | 3 +- .../stdarch/crates/stdarch-test/src/lib.rs | 39 +- library/stdarch/examples/Cargo.toml | 1 - library/stdarch/examples/connect5.rs | 49 +- library/stdarch/examples/hex.rs | 8 +- library/stdarch/triagebot.toml | 2 +- library/sysroot/Cargo.toml | 4 +- library/windows_targets/Cargo.toml | 5 + library/windows_targets/src/lib.rs | 16 +- rust-toolchain.toml | 2 +- tool_config/kani-version.toml | 2 +- 235 files changed, 6901 insertions(+), 7305 deletions(-) create mode 100644 library/compiler-builtins/.github/workflows/rustc-pull.yml delete mode 100644 library/compiler-builtins/crates/josh-sync/Cargo.toml delete mode 100644 library/compiler-builtins/crates/josh-sync/src/main.rs delete mode 100644 library/compiler-builtins/crates/josh-sync/src/sync.rs create mode 100644 library/compiler-builtins/josh-sync.toml create mode 100644 library/core/src/mem/drop_guard.rs create mode 100644 library/std/src/sync/nonpoison.rs create mode 100644 library/std/src/sync/nonpoison/mutex.rs rename library/{stdarch/crates => }/std_detect/Cargo.toml (76%) rename library/{stdarch/crates => }/std_detect/README.md (99%) rename library/{stdarch/crates => }/std_detect/src/detect/arch/aarch64.rs (100%) rename library/{stdarch/crates => }/std_detect/src/detect/arch/arm.rs (100%) rename library/{stdarch/crates => }/std_detect/src/detect/arch/loongarch.rs (96%) rename library/{stdarch/crates => }/std_detect/src/detect/arch/mips.rs (100%) rename library/{stdarch/crates => }/std_detect/src/detect/arch/mips64.rs (100%) rename library/{stdarch/crates => }/std_detect/src/detect/arch/mod.rs (96%) rename library/{stdarch/crates => }/std_detect/src/detect/arch/powerpc.rs (100%) rename library/{stdarch/crates => }/std_detect/src/detect/arch/powerpc64.rs (100%) rename library/{stdarch/crates => }/std_detect/src/detect/arch/riscv.rs (100%) rename library/{stdarch/crates => }/std_detect/src/detect/arch/s390x.rs (100%) rename library/{stdarch/crates => }/std_detect/src/detect/arch/x86.rs (100%) rename library/{stdarch/crates => }/std_detect/src/detect/bit.rs (100%) rename library/{stdarch/crates => }/std_detect/src/detect/cache.rs (88%) rename library/{stdarch/crates => }/std_detect/src/detect/macros.rs (99%) rename library/{stdarch/crates => }/std_detect/src/detect/mod.rs (99%) rename library/{stdarch/crates => }/std_detect/src/detect/os/aarch64.rs (97%) rename library/{stdarch/crates => }/std_detect/src/detect/os/darwin/aarch64.rs (97%) rename library/{stdarch/crates => }/std_detect/src/detect/os/freebsd/aarch64.rs (100%) rename library/{stdarch/crates => }/std_detect/src/detect/os/freebsd/arm.rs (100%) rename library/{stdarch/crates => }/std_detect/src/detect/os/freebsd/auxvec.rs (94%) rename library/{stdarch/crates => }/std_detect/src/detect/os/freebsd/mod.rs (100%) rename library/{stdarch/crates => }/std_detect/src/detect/os/freebsd/powerpc.rs (100%) rename library/{stdarch/crates => }/std_detect/src/detect/os/linux/aarch64.rs (84%) create mode 100644 library/std_detect/src/detect/os/linux/aarch64/tests.rs rename library/{stdarch/crates => }/std_detect/src/detect/os/linux/arm.rs (100%) rename library/{stdarch/crates => }/std_detect/src/detect/os/linux/auxvec.rs (66%) create mode 100644 library/std_detect/src/detect/os/linux/auxvec/tests.rs rename library/{stdarch/crates => }/std_detect/src/detect/os/linux/loongarch.rs (88%) rename library/{stdarch/crates => }/std_detect/src/detect/os/linux/mips.rs (100%) rename library/{stdarch/crates => }/std_detect/src/detect/os/linux/mod.rs (82%) rename library/{stdarch/crates => }/std_detect/src/detect/os/linux/powerpc.rs (100%) rename library/{stdarch/crates => }/std_detect/src/detect/os/linux/riscv.rs (98%) rename library/{stdarch/crates => }/std_detect/src/detect/os/linux/s390x.rs (100%) rename library/{stdarch/crates => }/std_detect/src/detect/os/openbsd/aarch64.rs (98%) rename library/{stdarch/crates => }/std_detect/src/detect/os/other.rs (100%) rename library/{stdarch/crates => }/std_detect/src/detect/os/riscv.rs (69%) create mode 100644 library/std_detect/src/detect/os/riscv/tests.rs rename library/{stdarch/crates => }/std_detect/src/detect/os/windows/aarch64.rs (100%) rename library/{stdarch/crates => }/std_detect/src/detect/os/x86.rs (94%) rename library/{stdarch/crates => }/std_detect/src/detect/test_data/linux-artificial-aarch64.auxv (100%) rename library/{stdarch/crates => }/std_detect/src/detect/test_data/linux-empty-hwcap2-aarch64.auxv (100%) rename library/{stdarch/crates => }/std_detect/src/detect/test_data/linux-hwcap2-aarch64.auxv (100%) rename library/{stdarch/crates => }/std_detect/src/detect/test_data/linux-no-hwcap2-aarch64.auxv (100%) rename library/{stdarch/crates => }/std_detect/src/detect/test_data/linux-rpi3.auxv (100%) rename library/{stdarch/crates => }/std_detect/src/detect/test_data/macos-virtualbox-linux-x86-4850HQ.auxv (100%) rename library/{stdarch/crates => }/std_detect/src/lib.rs (100%) rename library/{stdarch/crates => }/std_detect/tests/cpu-detection.rs (94%) rename library/{stdarch/crates => }/std_detect/tests/macro_trailing_commas.rs (92%) rename library/{stdarch/crates => }/std_detect/tests/x86-specific.rs (85%) create mode 100644 library/stdarch/.github/workflows/rustc-pull.yml delete mode 100755 library/stdarch/ci/build-std-detect.sh create mode 100644 library/stdarch/crates/core_arch/src/loongarch32/mod.rs create mode 100644 library/stdarch/crates/core_arch/src/loongarch_shared/mod.rs delete mode 100644 library/stdarch/crates/std_detect/LICENSE-APACHE delete mode 100644 library/stdarch/crates/std_detect/LICENSE-MIT diff --git a/library/Cargo.lock b/library/Cargo.lock index 8b5275e5065e6..a9a611fe1ed56 100644 --- a/library/Cargo.lock +++ b/library/Cargo.lock @@ -78,9 +78,9 @@ dependencies = [ [[package]] name = "dlmalloc" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d01597dde41c0b9da50d5f8c219023d63d8f27f39a27095070fd191fddc83891" +checksum = "fa3a2dbee57b69fbb5dbe852fa9c0925697fb0c7fbcb1593e90e5ffaedf13d51" dependencies = [ "cfg-if", "libc", @@ -90,11 +90,10 @@ dependencies = [ [[package]] name = "fortanix-sgx-abi" -version = "0.5.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57cafc2274c10fab234f176b25903ce17e690fca7597090d50880e047a0389c5" +checksum = "5efc85edd5b83e8394f4371dd0da6859dff63dd387dab8568fece6af4cde6f84" dependencies = [ - "compiler_builtins", "rustc-std-workspace-core", ] @@ -238,9 +237,9 @@ dependencies = [ [[package]] name = "rand" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fbfd9d094a40bf3ae768db9361049ace4c0e04a4fd6b359518bd7b73a73dd97" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ "rand_core", ] @@ -340,10 +339,10 @@ dependencies = [ name = "std_detect" version = "0.1.5" dependencies = [ + "alloc", "cfg-if", + "core", "libc", - "rustc-std-workspace-alloc", - "rustc-std-workspace-core", ] [[package]] diff --git a/library/alloc/Cargo.toml b/library/alloc/Cargo.toml index 68e83aae5a73e..23909330ecf30 100644 --- a/library/alloc/Cargo.toml +++ b/library/alloc/Cargo.toml @@ -22,9 +22,7 @@ safety = { path = "../contracts/safety" } [features] compiler-builtins-mem = ['compiler_builtins/mem'] compiler-builtins-c = ["compiler_builtins/c"] -compiler-builtins-no-asm = ["compiler_builtins/no-asm"] compiler-builtins-no-f16-f128 = ["compiler_builtins/no-f16-f128"] -compiler-builtins-mangled-names = ["compiler_builtins/mangled-names"] # Make panics and failed asserts immediately abort without formatting any message panic_immediate_abort = ["core/panic_immediate_abort"] # Choose algorithms that are optimized for binary size instead of runtime performance diff --git a/library/alloc/src/collections/vec_deque/drain.rs b/library/alloc/src/collections/vec_deque/drain.rs index 44fcef4ed7dc4..321621d18be9f 100644 --- a/library/alloc/src/collections/vec_deque/drain.rs +++ b/library/alloc/src/collections/vec_deque/drain.rs @@ -192,7 +192,7 @@ impl Drop for Drain<'_, T, A> { // this branch is never taken. // We use `#[cold]` instead of `#[inline(never)]`, because inlining this // function into the general case (`.drain(n..m)`) is fine. - // See `tests/codegen/vecdeque-drain.rs` for a test. + // See `tests/codegen-llvm/vecdeque-drain.rs` for a test. #[cold] fn join_head_and_tail_wrapping( source_deque: &mut VecDeque, diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs index 4bb82fb519cd0..449c3c21750bb 100644 --- a/library/alloc/src/lib.rs +++ b/library/alloc/src/lib.rs @@ -95,7 +95,6 @@ #![cfg_attr(kani, feature(kani))] #![feature(alloc_layout_extra)] #![feature(allocator_api)] -#![feature(array_chunks)] #![feature(array_into_iter_constructors)] #![feature(array_windows)] #![feature(ascii_char)] diff --git a/library/alloc/src/raw_vec/mod.rs b/library/alloc/src/raw_vec/mod.rs index 3e006a2d1bdf1..40716755aad34 100644 --- a/library/alloc/src/raw_vec/mod.rs +++ b/library/alloc/src/raw_vec/mod.rs @@ -761,7 +761,7 @@ impl RawVecInner { } // not marked inline(never) since we want optimizers to be able to observe the specifics of this -// function, see tests/codegen/vec-reserve-extend.rs. +// function, see tests/codegen-llvm/vec-reserve-extend.rs. #[cold] fn finish_grow( new_layout: Layout, diff --git a/library/alloc/src/slice.rs b/library/alloc/src/slice.rs index b4da56578c894..ce9f967cc387a 100644 --- a/library/alloc/src/slice.rs +++ b/library/alloc/src/slice.rs @@ -16,10 +16,6 @@ use core::cmp::Ordering::{self, Less}; use core::mem::MaybeUninit; #[cfg(not(no_global_oom_handling))] use core::ptr; -#[unstable(feature = "array_chunks", issue = "74985")] -pub use core::slice::ArrayChunks; -#[unstable(feature = "array_chunks", issue = "74985")] -pub use core::slice::ArrayChunksMut; #[unstable(feature = "array_windows", issue = "75027")] pub use core::slice::ArrayWindows; #[stable(feature = "inherent_ascii_escape", since = "1.60.0")] diff --git a/library/alloc/src/string.rs b/library/alloc/src/string.rs index a189c00a6b61d..d58240f3051e0 100644 --- a/library/alloc/src/string.rs +++ b/library/alloc/src/string.rs @@ -787,12 +787,12 @@ impl String { #[cfg(not(no_global_oom_handling))] #[unstable(feature = "str_from_utf16_endian", issue = "116258")] pub fn from_utf16le(v: &[u8]) -> Result { - if v.len() % 2 != 0 { + let (chunks, []) = v.as_chunks::<2>() else { return Err(FromUtf16Error(())); - } + }; match (cfg!(target_endian = "little"), unsafe { v.align_to::() }) { (true, ([], v, [])) => Self::from_utf16(v), - _ => char::decode_utf16(v.array_chunks::<2>().copied().map(u16::from_le_bytes)) + _ => char::decode_utf16(chunks.iter().copied().map(u16::from_le_bytes)) .collect::>() .map_err(|_| FromUtf16Error(())), } @@ -830,11 +830,11 @@ impl String { (true, ([], v, [])) => Self::from_utf16_lossy(v), (true, ([], v, [_remainder])) => Self::from_utf16_lossy(v) + "\u{FFFD}", _ => { - let mut iter = v.array_chunks::<2>(); - let string = char::decode_utf16(iter.by_ref().copied().map(u16::from_le_bytes)) + let (chunks, remainder) = v.as_chunks::<2>(); + let string = char::decode_utf16(chunks.iter().copied().map(u16::from_le_bytes)) .map(|r| r.unwrap_or(char::REPLACEMENT_CHARACTER)) .collect(); - if iter.remainder().is_empty() { string } else { string + "\u{FFFD}" } + if remainder.is_empty() { string } else { string + "\u{FFFD}" } } } } @@ -862,12 +862,12 @@ impl String { #[cfg(not(no_global_oom_handling))] #[unstable(feature = "str_from_utf16_endian", issue = "116258")] pub fn from_utf16be(v: &[u8]) -> Result { - if v.len() % 2 != 0 { + let (chunks, []) = v.as_chunks::<2>() else { return Err(FromUtf16Error(())); - } + }; match (cfg!(target_endian = "big"), unsafe { v.align_to::() }) { (true, ([], v, [])) => Self::from_utf16(v), - _ => char::decode_utf16(v.array_chunks::<2>().copied().map(u16::from_be_bytes)) + _ => char::decode_utf16(chunks.iter().copied().map(u16::from_be_bytes)) .collect::>() .map_err(|_| FromUtf16Error(())), } @@ -905,11 +905,11 @@ impl String { (true, ([], v, [])) => Self::from_utf16_lossy(v), (true, ([], v, [_remainder])) => Self::from_utf16_lossy(v) + "\u{FFFD}", _ => { - let mut iter = v.array_chunks::<2>(); - let string = char::decode_utf16(iter.by_ref().copied().map(u16::from_be_bytes)) + let (chunks, remainder) = v.as_chunks::<2>(); + let string = char::decode_utf16(chunks.iter().copied().map(u16::from_be_bytes)) .map(|r| r.unwrap_or(char::REPLACEMENT_CHARACTER)) .collect(); - if iter.remainder().is_empty() { string } else { string + "\u{FFFD}" } + if remainder.is_empty() { string } else { string + "\u{FFFD}" } } } } diff --git a/library/compiler-builtins/.github/workflows/rustc-pull.yml b/library/compiler-builtins/.github/workflows/rustc-pull.yml new file mode 100644 index 0000000000000..ba698492e42a7 --- /dev/null +++ b/library/compiler-builtins/.github/workflows/rustc-pull.yml @@ -0,0 +1,23 @@ +# Perform a subtree sync (pull) using the josh-sync tool once every few days (or on demand). +name: rustc-pull + +on: + workflow_dispatch: + schedule: + # Run at 04:00 UTC every Monday and Thursday + - cron: '0 4 * * 1,4' + +jobs: + pull: + if: github.repository == 'rust-lang/compiler-builtins' + uses: rust-lang/josh-sync/.github/workflows/rustc-pull.yml@main + with: + # https://rust-lang.zulipchat.com/#narrow/channel/219381-t-libs/topic/compiler-builtins.20subtree.20sync.20automation/with/528482375 + zulip-stream-id: 219381 + zulip-topic: 'compiler-builtins subtree sync automation' + zulip-bot-email: "compiler-builtins-ci-bot@rust-lang.zulipchat.com" + pr-base-branch: master + branch-name: rustc-pull + secrets: + zulip-api-token: ${{ secrets.ZULIP_API_TOKEN }} + token: ${{ secrets.GITHUB_TOKEN }} diff --git a/library/compiler-builtins/CONTRIBUTING.md b/library/compiler-builtins/CONTRIBUTING.md index 9f67cfc31571d..9ae4f893c60d1 100644 --- a/library/compiler-builtins/CONTRIBUTING.md +++ b/library/compiler-builtins/CONTRIBUTING.md @@ -165,3 +165,12 @@ cargo bench --no-default-features \ [`iai-callgrind-runner`]: https://crates.io/crates/iai-callgrind-runner [Valgrind]: https://valgrind.org/ + +## Subtree synchronization + +`compiler-builtins` is included as a [Josh subtree] in the main compiler +repository (`rust-lang/rust`). You can find a guide on how to create synchronization +(pull and push) PRs at the [`rustc-dev-guide` page]. + +[Josh subtree]: https://rustc-dev-guide.rust-lang.org/external-repos.html#josh-subtrees +[`rustc-dev-guide` page]: https://rustc-dev-guide.rust-lang.org/external-repos.html#synchronizing-a-josh-subtree diff --git a/library/compiler-builtins/Cargo.toml b/library/compiler-builtins/Cargo.toml index 41350c6cb9909..956d738f3b1f1 100644 --- a/library/compiler-builtins/Cargo.toml +++ b/library/compiler-builtins/Cargo.toml @@ -3,7 +3,6 @@ resolver = "2" members = [ "builtins-shim", "builtins-test", - "crates/josh-sync", "crates/libm-macros", "crates/musl-math-sys", "crates/panic-handler", diff --git a/library/compiler-builtins/builtins-test/Cargo.toml b/library/compiler-builtins/builtins-test/Cargo.toml index 093d4633f8746..00a9d8579d119 100644 --- a/library/compiler-builtins/builtins-test/Cargo.toml +++ b/library/compiler-builtins/builtins-test/Cargo.toml @@ -12,9 +12,9 @@ license = "MIT AND Apache-2.0 WITH LLVM-exception AND (MIT OR Apache-2.0)" # `xoshiro128**` is used for its quality, size, and speed at generating `u32` shift amounts. rand_xoshiro = "0.7" # To compare float builtins against -rustc_apfloat = "0.2.2" +rustc_apfloat = "0.2.3" # Really a dev dependency, but dev dependencies can't be optional -iai-callgrind = { version = "0.14.1", optional = true } +iai-callgrind = { version = "0.15.2", optional = true } [dependencies.compiler_builtins] path = "../builtins-shim" diff --git a/library/compiler-builtins/ci/bench-icount.sh b/library/compiler-builtins/ci/bench-icount.sh index d2baebb52d8fd..12228b9da971b 100755 --- a/library/compiler-builtins/ci/bench-icount.sh +++ b/library/compiler-builtins/ci/bench-icount.sh @@ -28,7 +28,7 @@ function run_icount_benchmarks() { iai_args=( "--home" "$(pwd)/$iai_home" - "--regression=ir=5.0" + "--callgrind-limits=ir=5.0" "--save-summary" ) diff --git a/library/compiler-builtins/compiler-builtins/src/mem/impls.rs b/library/compiler-builtins/compiler-builtins/src/mem/impls.rs index 14a4787485dc8..da16dee25ce54 100644 --- a/library/compiler-builtins/compiler-builtins/src/mem/impls.rs +++ b/library/compiler-builtins/compiler-builtins/src/mem/impls.rs @@ -15,6 +15,7 @@ // this use. Of course this is not a guarantee that such use will work, it just means that this // crate doing wrapping pointer arithmetic with a method that must not wrap won't be the problem if // something does go wrong at runtime. +use core::ffi::c_int; use core::intrinsics::likely; const WORD_SIZE: usize = core::mem::size_of::(); @@ -384,13 +385,13 @@ pub unsafe fn set_bytes(mut s: *mut u8, c: u8, mut n: usize) { } #[inline(always)] -pub unsafe fn compare_bytes(s1: *const u8, s2: *const u8, n: usize) -> i32 { +pub unsafe fn compare_bytes(s1: *const u8, s2: *const u8, n: usize) -> c_int { let mut i = 0; while i < n { let a = *s1.wrapping_add(i); let b = *s2.wrapping_add(i); if a != b { - return a as i32 - b as i32; + return c_int::from(a) - c_int::from(b); } i += 1; } diff --git a/library/compiler-builtins/compiler-builtins/src/mem/mod.rs b/library/compiler-builtins/compiler-builtins/src/mem/mod.rs index 6828f3804e0fe..a227f60a2949b 100644 --- a/library/compiler-builtins/compiler-builtins/src/mem/mod.rs +++ b/library/compiler-builtins/compiler-builtins/src/mem/mod.rs @@ -3,13 +3,6 @@ // FIXME(e2024): this eventually needs to be removed. #![allow(unsafe_op_in_unsafe_fn)] -#[allow(warnings)] -#[cfg(target_pointer_width = "16")] -type c_int = i16; -#[allow(warnings)] -#[cfg(not(target_pointer_width = "16"))] -type c_int = i32; - // memcpy/memmove/memset have optimized implementations on some architectures #[cfg_attr( all(not(feature = "no-asm"), target_arch = "x86_64"), @@ -38,18 +31,18 @@ intrinsics! { } #[mem_builtin] - pub unsafe extern "C" fn memset(s: *mut u8, c: crate::mem::c_int, n: usize) -> *mut u8 { + pub unsafe extern "C" fn memset(s: *mut u8, c: core::ffi::c_int, n: usize) -> *mut u8 { impls::set_bytes(s, c as u8, n); s } #[mem_builtin] - pub unsafe extern "C" fn memcmp(s1: *const u8, s2: *const u8, n: usize) -> i32 { + pub unsafe extern "C" fn memcmp(s1: *const u8, s2: *const u8, n: usize) -> core::ffi::c_int { impls::compare_bytes(s1, s2, n) } #[mem_builtin] - pub unsafe extern "C" fn bcmp(s1: *const u8, s2: *const u8, n: usize) -> i32 { + pub unsafe extern "C" fn bcmp(s1: *const u8, s2: *const u8, n: usize) -> core::ffi::c_int { memcmp(s1, s2, n) } diff --git a/library/compiler-builtins/crates/josh-sync/Cargo.toml b/library/compiler-builtins/crates/josh-sync/Cargo.toml deleted file mode 100644 index 8e2e891db5426..0000000000000 --- a/library/compiler-builtins/crates/josh-sync/Cargo.toml +++ /dev/null @@ -1,8 +0,0 @@ -[package] -name = "josh-sync" -edition = "2024" -publish = false - -[dependencies] -directories = "6.0.0" -regex-lite = "0.1.6" diff --git a/library/compiler-builtins/crates/josh-sync/src/main.rs b/library/compiler-builtins/crates/josh-sync/src/main.rs deleted file mode 100644 index 7f0b11900337b..0000000000000 --- a/library/compiler-builtins/crates/josh-sync/src/main.rs +++ /dev/null @@ -1,45 +0,0 @@ -use std::io::{Read, Write}; -use std::process::exit; -use std::{env, io}; - -use crate::sync::{GitSync, Josh}; - -mod sync; - -const USAGE: &str = r#"Utility for synchroniing compiler-builtins with rust-lang/rust - -Usage: - - josh-sync rustc-pull - - Pull from rust-lang/rust to compiler-builtins. Creates a commit - updating the version file, followed by a merge commit. - - josh-sync rustc-push GITHUB_USERNAME [BRANCH] - - Create a branch off of rust-lang/rust updating compiler-builtins. -"#; - -fn main() { - let sync = GitSync::from_current_dir(); - - // Collect args, then recollect as str refs so we can match on them - let args: Vec<_> = env::args().collect(); - let args: Vec<&str> = args.iter().map(String::as_str).collect(); - - match args.as_slice()[1..] { - ["rustc-pull"] => sync.rustc_pull(None), - ["rustc-push", github_user, branch] => sync.rustc_push(github_user, Some(branch)), - ["rustc-push", github_user] => sync.rustc_push(github_user, None), - ["start-josh"] => { - let _josh = Josh::start(); - println!("press enter to stop"); - io::stdout().flush().unwrap(); - let _ = io::stdin().read(&mut [0u8]).unwrap(); - } - _ => { - println!("{USAGE}"); - exit(1); - } - } -} diff --git a/library/compiler-builtins/crates/josh-sync/src/sync.rs b/library/compiler-builtins/crates/josh-sync/src/sync.rs deleted file mode 100644 index 2d89d2d1cea2f..0000000000000 --- a/library/compiler-builtins/crates/josh-sync/src/sync.rs +++ /dev/null @@ -1,401 +0,0 @@ -use std::borrow::Cow; -use std::net::{SocketAddr, TcpStream}; -use std::process::{Command, Stdio, exit}; -use std::time::Duration; -use std::{env, fs, process, thread}; - -use regex_lite::Regex; - -const JOSH_PORT: u16 = 42042; -const DEFAULT_PR_BRANCH: &str = "update-builtins"; - -pub struct GitSync { - upstream_repo: String, - upstream_ref: String, - upstream_url: String, - josh_filter: String, - josh_url_base: String, -} - -/// This code was adapted from the miri repository, via the rustc-dev-guide -/// () -impl GitSync { - pub fn from_current_dir() -> Self { - let upstream_repo = - env::var("UPSTREAM_ORG").unwrap_or_else(|_| "rust-lang".to_owned()) + "/rust"; - - Self { - upstream_url: format!("https://github.com/{upstream_repo}"), - upstream_repo, - upstream_ref: env::var("UPSTREAM_REF").unwrap_or_else(|_| "HEAD".to_owned()), - josh_filter: ":/library/compiler-builtins".to_owned(), - josh_url_base: format!("http://localhost:{JOSH_PORT}"), - } - } - - /// Pull from rust-lang/rust to compiler-builtins. - pub fn rustc_pull(&self, commit: Option) { - let Self { - upstream_ref, - upstream_url, - upstream_repo, - .. - } = self; - - let new_upstream_base = commit.unwrap_or_else(|| { - let out = check_output(["git", "ls-remote", upstream_url, upstream_ref]); - out.split_whitespace() - .next() - .unwrap_or_else(|| panic!("could not split output: '{out}'")) - .to_owned() - }); - - ensure_clean(); - - // Make sure josh is running. - let _josh = Josh::start(); - let josh_url_filtered = self.josh_url( - &self.upstream_repo, - Some(&new_upstream_base), - Some(&self.josh_filter), - ); - - let previous_upstream_base = fs::read_to_string("rust-version") - .expect("failed to read `rust-version`") - .trim() - .to_string(); - assert_ne!(previous_upstream_base, new_upstream_base, "nothing to pull"); - - let orig_head = check_output(["git", "rev-parse", "HEAD"]); - println!("original upstream base: {previous_upstream_base}"); - println!("new upstream base: {new_upstream_base}"); - println!("original HEAD: {orig_head}"); - - // Fetch the latest upstream HEAD so we can get a summary. Use the Josh URL for caching. - run([ - "git", - "fetch", - &self.josh_url(&self.upstream_repo, Some(&new_upstream_base), Some(":/")), - &new_upstream_base, - "--depth=1", - ]); - let new_summary = check_output(["git", "log", "-1", "--format=%h %s", &new_upstream_base]); - let new_summary = replace_references(&new_summary, &self.upstream_repo); - - // Update rust-version file. As a separate commit, since making it part of - // the merge has confused the heck out of josh in the past. - // We pass `--no-verify` to avoid running git hooks. - // We do this before the merge so that if there are merge conflicts, we have - // the right rust-version file while resolving them. - fs::write("rust-version", format!("{new_upstream_base}\n")) - .expect("failed to write rust-version"); - - let prep_message = format!( - "Update the upstream Rust version\n\n\ - To prepare for merging from {upstream_repo}, set the version file to:\n\n \ - {new_summary}\n\ - ", - ); - run([ - "git", - "commit", - "rust-version", - "--no-verify", - "-m", - &prep_message, - ]); - - // Fetch given rustc commit. - run(["git", "fetch", &josh_url_filtered]); - let incoming_ref = check_output(["git", "rev-parse", "FETCH_HEAD"]); - println!("incoming ref: {incoming_ref}"); - - let merge_message = format!( - "Merge ref '{upstream_head_short}{filter}' from {upstream_url}\n\n\ - Pull recent changes from {upstream_repo} via Josh.\n\n\ - Upstream ref: {new_upstream_base}\n\ - Filtered ref: {incoming_ref}\n\ - ", - upstream_head_short = &new_upstream_base[..12], - filter = self.josh_filter - ); - - // This should not add any new root commits. So count those before and after merging. - let num_roots = || -> u32 { - let out = check_output(["git", "rev-list", "HEAD", "--max-parents=0", "--count"]); - out.trim() - .parse::() - .unwrap_or_else(|e| panic!("failed to parse `{out}`: {e}")) - }; - let num_roots_before = num_roots(); - - let pre_merge_sha = check_output(["git", "rev-parse", "HEAD"]); - println!("pre-merge HEAD: {pre_merge_sha}"); - - // Merge the fetched commit. - run([ - "git", - "merge", - "FETCH_HEAD", - "--no-verify", - "--no-ff", - "-m", - &merge_message, - ]); - - let current_sha = check_output(["git", "rev-parse", "HEAD"]); - if current_sha == pre_merge_sha { - run(["git", "reset", "--hard", &orig_head]); - eprintln!( - "No merge was performed, no changes to pull were found. \ - Rolled back the preparation commit." - ); - exit(1); - } - - // Check that the number of roots did not increase. - assert_eq!( - num_roots(), - num_roots_before, - "Josh created a new root commit. This is probably not the history you want." - ); - } - - /// Construct an update to rust-lang/rust from compiler-builtins. - pub fn rustc_push(&self, github_user: &str, branch: Option<&str>) { - let Self { - josh_filter, - upstream_url, - .. - } = self; - - let branch = branch.unwrap_or(DEFAULT_PR_BRANCH); - let josh_url = self.josh_url(&format!("{github_user}/rust"), None, Some(josh_filter)); - let user_upstream_url = format!("git@github.com:{github_user}/rust.git"); - - let Ok(rustc_git) = env::var("RUSTC_GIT") else { - panic!("the RUSTC_GIT environment variable must be set to a rust-lang/rust checkout") - }; - - ensure_clean(); - let base = fs::read_to_string("rust-version") - .expect("failed to read `rust-version`") - .trim() - .to_string(); - - // Make sure josh is running. - let _josh = Josh::start(); - - // Prepare the branch. Pushing works much better if we use as base exactly - // the commit that we pulled from last time, so we use the `rust-version` - // file to find out which commit that would be. - println!("Preparing {github_user}/rust (base: {base})..."); - - if Command::new("git") - .args(["-C", &rustc_git, "fetch", &user_upstream_url, branch]) - .output() // capture output - .expect("could not run fetch") - .status - .success() - { - panic!( - "The branch '{branch}' seems to already exist in '{user_upstream_url}'. \ - Please delete it and try again." - ); - } - - run(["git", "-C", &rustc_git, "fetch", upstream_url, &base]); - - run_cfg("git", |c| { - c.args([ - "-C", - &rustc_git, - "push", - &user_upstream_url, - &format!("{base}:refs/heads/{branch}"), - ]) - .stdout(Stdio::null()) - .stderr(Stdio::null()) // silence the "create GitHub PR" message - }); - println!("pushed PR branch"); - - // Do the actual push. - println!("Pushing changes..."); - run(["git", "push", &josh_url, &format!("HEAD:{branch}")]); - println!(); - - // Do a round-trip check to make sure the push worked as expected. - run(["git", "fetch", &josh_url, branch]); - - let head = check_output(["git", "rev-parse", "HEAD"]); - let fetch_head = check_output(["git", "rev-parse", "FETCH_HEAD"]); - assert_eq!( - head, fetch_head, - "Josh created a non-roundtrip push! Do NOT merge this into rustc!\n\ - Expected {head}, got {fetch_head}." - ); - println!( - "Confirmed that the push round-trips back to compiler-builtins properly. Please \ - create a rustc PR:" - ); - // Open PR with `subtree update` title to silence the `no-merges` triagebot check - println!( - " {upstream_url}/compare/{github_user}:{branch}?quick_pull=1\ - &title=Update%20the%20%60compiler-builtins%60%20subtree\ - &body=Update%20the%20Josh%20subtree%20to%20https%3A%2F%2Fgithub.com%2Frust-lang%2F\ - compiler-builtins%2Fcommit%2F{head_short}.%0A%0Ar%3F%20%40ghost", - head_short = &head[..12], - ); - } - - /// Construct a url to the local Josh server with (optionally) - fn josh_url(&self, repo: &str, rev: Option<&str>, filter: Option<&str>) -> String { - format!( - "{base}/{repo}.git{at}{rev}{filter}{filt_git}", - base = self.josh_url_base, - at = if rev.is_some() { "@" } else { "" }, - rev = rev.unwrap_or_default(), - filter = filter.unwrap_or_default(), - filt_git = if filter.is_some() { ".git" } else { "" } - ) - } -} - -/// Fail if there are files that need to be checked in. -fn ensure_clean() { - let read = check_output(["git", "status", "--untracked-files=no", "--porcelain"]); - assert!( - read.is_empty(), - "working directory must be clean before performing rustc pull" - ); -} - -/* Helpers for running commands with logged invocations */ - -/// Run a command from an array, passing its output through. -fn run<'a, Args: AsRef<[&'a str]>>(l: Args) { - let l = l.as_ref(); - run_cfg(l[0], |c| c.args(&l[1..])); -} - -/// Run a command from an array, collecting its output. -fn check_output<'a, Args: AsRef<[&'a str]>>(l: Args) -> String { - let l = l.as_ref(); - check_output_cfg(l[0], |c| c.args(&l[1..])) -} - -/// [`run`] with configuration. -fn run_cfg(prog: &str, f: impl FnOnce(&mut Command) -> &mut Command) { - // self.read(l.as_ref()); - check_output_cfg(prog, |c| f(c.stdout(Stdio::inherit()))); -} - -/// [`read`] with configuration. All shell helpers print the command and pass stderr. -fn check_output_cfg(prog: &str, f: impl FnOnce(&mut Command) -> &mut Command) -> String { - let mut cmd = Command::new(prog); - cmd.stderr(Stdio::inherit()); - f(&mut cmd); - eprintln!("+ {cmd:?}"); - let out = cmd.output().expect("command failed"); - assert!(out.status.success()); - String::from_utf8(out.stdout.trim_ascii().to_vec()).expect("non-UTF8 output") -} - -/// Replace `#1234`-style issue/PR references with `repo#1234` to ensure links work across -/// repositories. -fn replace_references<'a>(s: &'a str, repo: &str) -> Cow<'a, str> { - let re = Regex::new(r"\B(?P#\d+)\b").unwrap(); - re.replace(s, &format!("{repo}$id")) -} - -/// Create a wrapper that stops Josh on drop. -pub struct Josh(process::Child); - -impl Josh { - pub fn start() -> Self { - // Determine cache directory. - let user_dirs = - directories::ProjectDirs::from("org", "rust-lang", "rustc-compiler-builtins-josh") - .unwrap(); - let local_dir = user_dirs.cache_dir().to_owned(); - - // Start josh, silencing its output. - #[expect(clippy::zombie_processes, reason = "clippy can't handle the loop")] - let josh = process::Command::new("josh-proxy") - .arg("--local") - .arg(local_dir) - .args([ - "--remote=https://github.com", - &format!("--port={JOSH_PORT}"), - "--no-background", - ]) - .stdout(Stdio::null()) - .stderr(Stdio::null()) - .spawn() - .expect("failed to start josh-proxy, make sure it is installed"); - - // Wait until the port is open. We try every 10ms until 1s passed. - for _ in 0..100 { - // This will generally fail immediately when the port is still closed. - let addr = SocketAddr::from(([127, 0, 0, 1], JOSH_PORT)); - let josh_ready = TcpStream::connect_timeout(&addr, Duration::from_millis(1)); - - if josh_ready.is_ok() { - println!("josh up and running"); - return Josh(josh); - } - - // Not ready yet. - thread::sleep(Duration::from_millis(10)); - } - panic!("Even after waiting for 1s, josh-proxy is still not available.") - } -} - -impl Drop for Josh { - fn drop(&mut self) { - if cfg!(unix) { - // Try to gracefully shut it down. - Command::new("kill") - .args(["-s", "INT", &self.0.id().to_string()]) - .output() - .expect("failed to SIGINT josh-proxy"); - // Sadly there is no "wait with timeout"... so we just give it some time to finish. - thread::sleep(Duration::from_millis(100)); - // Now hopefully it is gone. - if self - .0 - .try_wait() - .expect("failed to wait for josh-proxy") - .is_some() - { - return; - } - } - // If that didn't work (or we're not on Unix), kill it hard. - eprintln!( - "I have to kill josh-proxy the hard way, let's hope this does not \ - break anything." - ); - self.0.kill().expect("failed to SIGKILL josh-proxy"); - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_replace() { - assert_eq!(replace_references("#1234", "r-l/rust"), "r-l/rust#1234"); - assert_eq!(replace_references("#1234x", "r-l/rust"), "#1234x"); - assert_eq!( - replace_references("merge #1234", "r-l/rust"), - "merge r-l/rust#1234" - ); - assert_eq!( - replace_references("foo/bar#1234", "r-l/rust"), - "foo/bar#1234" - ); - } -} diff --git a/library/compiler-builtins/crates/libm-macros/Cargo.toml b/library/compiler-builtins/crates/libm-macros/Cargo.toml index 6bbf47784ff91..100a8d0ec30e4 100644 --- a/library/compiler-builtins/crates/libm-macros/Cargo.toml +++ b/library/compiler-builtins/crates/libm-macros/Cargo.toml @@ -12,7 +12,7 @@ proc-macro = true heck = "0.5.0" proc-macro2 = "1.0.95" quote = "1.0.40" -syn = { version = "2.0.101", features = ["full", "extra-traits", "visit-mut"] } +syn = { version = "2.0.104", features = ["full", "extra-traits", "visit-mut"] } [lints.rust] # Values used during testing diff --git a/library/compiler-builtins/crates/musl-math-sys/Cargo.toml b/library/compiler-builtins/crates/musl-math-sys/Cargo.toml index 3b88117343b62..39f6fa9065bd9 100644 --- a/library/compiler-builtins/crates/musl-math-sys/Cargo.toml +++ b/library/compiler-builtins/crates/musl-math-sys/Cargo.toml @@ -11,4 +11,4 @@ license = "MIT OR Apache-2.0" libm = { path = "../../libm" } [build-dependencies] -cc = "1.2.25" +cc = "1.2.29" diff --git a/library/compiler-builtins/josh-sync.toml b/library/compiler-builtins/josh-sync.toml new file mode 100644 index 0000000000000..599a12af8e5d4 --- /dev/null +++ b/library/compiler-builtins/josh-sync.toml @@ -0,0 +1,3 @@ +org = "rust-lang" +repo = "compiler-builtins" +path = "library/compiler-builtins" diff --git a/library/compiler-builtins/libm-test/Cargo.toml b/library/compiler-builtins/libm-test/Cargo.toml index 05fcc3234e00b..0af6b0c1da5ca 100644 --- a/library/compiler-builtins/libm-test/Cargo.toml +++ b/library/compiler-builtins/libm-test/Cargo.toml @@ -31,8 +31,8 @@ short-benchmarks = [] anyhow = "1.0.98" # This is not directly used but is required so we can enable `gmp-mpfr-sys/force-cross`. gmp-mpfr-sys = { version = "1.6.5", optional = true, default-features = false } -iai-callgrind = { version = "0.14.1", optional = true } -indicatif = { version = "0.17.11", default-features = false } +iai-callgrind = { version = "0.15.2", optional = true } +indicatif = { version = "0.18.0", default-features = false } libm = { path = "../libm", features = ["unstable-public-internals"] } libm-macros = { path = "../crates/libm-macros" } musl-math-sys = { path = "../crates/musl-math-sys", optional = true } diff --git a/library/compiler-builtins/libm/src/math/support/float_traits.rs b/library/compiler-builtins/libm/src/math/support/float_traits.rs index c3e7eeec245c8..fb790e696159f 100644 --- a/library/compiler-builtins/libm/src/math/support/float_traits.rs +++ b/library/compiler-builtins/libm/src/math/support/float_traits.rs @@ -363,6 +363,7 @@ pub const fn f32_from_bits(bits: u32) -> f32 { } /// `f32::to_bits` +#[allow(dead_code)] // workaround for false positive RUST-144060 #[allow(unnecessary_transmutes)] // lint appears in newer versions of Rust pub const fn f32_to_bits(x: f32) -> u32 { // SAFETY: POD cast with no preconditions @@ -377,6 +378,7 @@ pub const fn f64_from_bits(bits: u64) -> f64 { } /// `f64::to_bits` +#[allow(dead_code)] // workaround for false positive RUST-144060 #[allow(unnecessary_transmutes)] // lint appears in newer versions of Rust pub const fn f64_to_bits(x: f64) -> u64 { // SAFETY: POD cast with no preconditions diff --git a/library/compiler-builtins/rust-version b/library/compiler-builtins/rust-version index 73183983599ff..a4db05a879683 100644 --- a/library/compiler-builtins/rust-version +++ b/library/compiler-builtins/rust-version @@ -1 +1 @@ -d087f112b7d1323446c7b39a8b616aee7fa56b3d +82310651b93a594a3fd69015e1562186a080d94c diff --git a/library/compiler-builtins/triagebot.toml b/library/compiler-builtins/triagebot.toml index ecc05da019517..8a2356c2b1c06 100644 --- a/library/compiler-builtins/triagebot.toml +++ b/library/compiler-builtins/triagebot.toml @@ -4,7 +4,7 @@ # Warns when a PR contains merge commits # Documentation at: https://forge.rust-lang.org/triagebot/no-merge.html [no-merges] -exclude_titles = ["Update from"] +exclude_titles = ["Rustc pull update"] # Canonicalize issue numbers to avoid closing the wrong issue # when commits are included in subtrees, as well as warning links in commits. @@ -19,3 +19,6 @@ check-commits = false # Enable issue transfers within the org # Documentation at: https://forge.rust-lang.org/triagebot/transfer.html [transfer] + +# Automatically close and reopen PRs made by bots to run CI on them +[bot-pull-requests] diff --git a/library/core/src/array/mod.rs b/library/core/src/array/mod.rs index 62203e132b70c..1c23218552aa1 100644 --- a/library/core/src/array/mod.rs +++ b/library/core/src/array/mod.rs @@ -198,7 +198,8 @@ impl Error for TryFromSliceError { } #[stable(feature = "try_from_slice_error", since = "1.36.0")] -impl From for TryFromSliceError { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const From for TryFromSliceError { fn from(x: Infallible) -> TryFromSliceError { match x {} } diff --git a/library/core/src/ascii/ascii_char.rs b/library/core/src/ascii/ascii_char.rs index 856a8632f269d..63e77df7cf317 100644 --- a/library/core/src/ascii/ascii_char.rs +++ b/library/core/src/ascii/ascii_char.rs @@ -554,7 +554,8 @@ macro_rules! into_int_impl { ($($ty:ty)*) => { $( #[unstable(feature = "ascii_char", issue = "110998")] - impl From for $ty { + #[rustc_const_unstable(feature = "const_try", issue = "74935")] + impl const From for $ty { #[inline] fn from(chr: AsciiChar) -> $ty { chr as u8 as $ty diff --git a/library/core/src/borrow.rs b/library/core/src/borrow.rs index ccb1cc4e974d6..da05f236d2fef 100644 --- a/library/core/src/borrow.rs +++ b/library/core/src/borrow.rs @@ -223,20 +223,20 @@ impl BorrowMut for T { #[stable(feature = "rust1", since = "1.0.0")] impl Borrow for &T { fn borrow(&self) -> &T { - &**self + self } } #[stable(feature = "rust1", since = "1.0.0")] impl Borrow for &mut T { fn borrow(&self) -> &T { - &**self + self } } #[stable(feature = "rust1", since = "1.0.0")] impl BorrowMut for &mut T { fn borrow_mut(&mut self) -> &mut T { - &mut **self + self } } diff --git a/library/core/src/char/convert.rs b/library/core/src/char/convert.rs index 14f6f10d3c9d9..bcea5082efa05 100644 --- a/library/core/src/char/convert.rs +++ b/library/core/src/char/convert.rs @@ -42,7 +42,8 @@ pub(super) const unsafe fn from_u32_unchecked(i: u32) -> char { } #[stable(feature = "char_convert", since = "1.13.0")] -impl From for u32 { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const From for u32 { /// Converts a [`char`] into a [`u32`]. /// /// # Examples @@ -59,7 +60,8 @@ impl From for u32 { } #[stable(feature = "more_char_conversions", since = "1.51.0")] -impl From for u64 { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const From for u64 { /// Converts a [`char`] into a [`u64`]. /// /// # Examples @@ -78,7 +80,8 @@ impl From for u64 { } #[stable(feature = "more_char_conversions", since = "1.51.0")] -impl From for u128 { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const From for u128 { /// Converts a [`char`] into a [`u128`]. /// /// # Examples @@ -163,7 +166,8 @@ impl TryFrom for u16 { /// for a superset of Windows-1252 that fills the remaining blanks with corresponding /// C0 and C1 control codes. #[stable(feature = "char_convert", since = "1.13.0")] -impl From for char { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const From for char { /// Converts a [`u8`] into a [`char`]. /// /// # Examples @@ -253,7 +257,8 @@ const fn char_try_from_u32(i: u32) -> Result { } #[stable(feature = "try_from", since = "1.34.0")] -impl TryFrom for char { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const TryFrom for char { type Error = CharTryFromError; #[inline] diff --git a/library/core/src/clone.rs b/library/core/src/clone.rs index a34d1b4a06497..51d037ddfd2cc 100644 --- a/library/core/src/clone.rs +++ b/library/core/src/clone.rs @@ -590,7 +590,7 @@ mod impls { #[inline(always)] #[rustc_diagnostic_item = "noop_method_clone"] fn clone(&self) -> Self { - *self + self } } diff --git a/library/core/src/convert/mod.rs b/library/core/src/convert/mod.rs index 38381dbdf2303..220a24caf09ee 100644 --- a/library/core/src/convert/mod.rs +++ b/library/core/src/convert/mod.rs @@ -216,6 +216,8 @@ pub const fn identity(x: T) -> T { /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[rustc_diagnostic_item = "AsRef"] +#[const_trait] +#[rustc_const_unstable(feature = "const_try", issue = "74935")] pub trait AsRef: PointeeSized { /// Converts this type into a shared reference of the (usually inferred) input type. #[stable(feature = "rust1", since = "1.0.0")] @@ -367,6 +369,8 @@ pub trait AsRef: PointeeSized { /// `&mut Vec`, for example, is the better choice (callers need to pass the correct type then). #[stable(feature = "rust1", since = "1.0.0")] #[rustc_diagnostic_item = "AsMut"] +#[const_trait] +#[rustc_const_unstable(feature = "const_try", issue = "74935")] pub trait AsMut: PointeeSized { /// Converts this type into a mutable reference of the (usually inferred) input type. #[stable(feature = "rust1", since = "1.0.0")] @@ -710,9 +714,10 @@ pub trait TryFrom: Sized { // As lifts over & #[stable(feature = "rust1", since = "1.0.0")] -impl AsRef for &T +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const AsRef for &T where - T: AsRef, + T: ~const AsRef, { #[inline] fn as_ref(&self) -> &U { @@ -722,9 +727,10 @@ where // As lifts over &mut #[stable(feature = "rust1", since = "1.0.0")] -impl AsRef for &mut T +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const AsRef for &mut T where - T: AsRef, + T: ~const AsRef, { #[inline] fn as_ref(&self) -> &U { @@ -742,9 +748,10 @@ where // AsMut lifts over &mut #[stable(feature = "rust1", since = "1.0.0")] -impl AsMut for &mut T +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const AsMut for &mut T where - T: AsMut, + T: ~const AsMut, { #[inline] fn as_mut(&mut self) -> &mut U { @@ -840,7 +847,8 @@ where //////////////////////////////////////////////////////////////////////////////// #[stable(feature = "rust1", since = "1.0.0")] -impl AsRef<[T]> for [T] { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const AsRef<[T]> for [T] { #[inline(always)] fn as_ref(&self) -> &[T] { self @@ -848,7 +856,8 @@ impl AsRef<[T]> for [T] { } #[stable(feature = "rust1", since = "1.0.0")] -impl AsMut<[T]> for [T] { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const AsMut<[T]> for [T] { #[inline(always)] fn as_mut(&mut self) -> &mut [T] { self @@ -856,7 +865,8 @@ impl AsMut<[T]> for [T] { } #[stable(feature = "rust1", since = "1.0.0")] -impl AsRef for str { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const AsRef for str { #[inline(always)] fn as_ref(&self) -> &str { self @@ -864,7 +874,8 @@ impl AsRef for str { } #[stable(feature = "as_mut_str_for_str", since = "1.51.0")] -impl AsMut for str { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const AsMut for str { #[inline(always)] fn as_mut(&mut self) -> &mut str { self @@ -925,7 +936,8 @@ impl AsMut for str { pub enum Infallible {} #[stable(feature = "convert_infallible", since = "1.34.0")] -impl Clone for Infallible { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const Clone for Infallible { fn clone(&self) -> Infallible { match *self {} } @@ -953,7 +965,8 @@ impl Error for Infallible { } #[stable(feature = "convert_infallible", since = "1.34.0")] -impl PartialEq for Infallible { +#[rustc_const_unstable(feature = "const_cmp", issue = "143800")] +impl const PartialEq for Infallible { fn eq(&self, _: &Infallible) -> bool { match *self {} } @@ -977,7 +990,8 @@ impl Ord for Infallible { } #[stable(feature = "convert_infallible", since = "1.34.0")] -impl From for Infallible { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const From for Infallible { #[inline] fn from(x: !) -> Self { x diff --git a/library/core/src/convert/num.rs b/library/core/src/convert/num.rs index 38e46a9f210f9..1a30a75de7ef6 100644 --- a/library/core/src/convert/num.rs +++ b/library/core/src/convert/num.rs @@ -78,7 +78,8 @@ macro_rules! impl_from { }; ($Small:ty => $Large:ty, #[$attr:meta], $doc:expr $(,)?) => { #[$attr] - impl From<$Small> for $Large { + #[rustc_const_unstable(feature = "const_try", issue = "74935")] + impl const From<$Small> for $Large { // Rustdocs on the impl block show a "[+] show undocumented items" toggle. // Rustdocs on functions do not. #[doc = $doc] @@ -210,7 +211,8 @@ macro_rules! impl_float_from_bool { )? ) => { #[stable(feature = "float_from_bool", since = "1.68.0")] - impl From for $float { + #[rustc_const_unstable(feature = "const_try", issue = "74935")] + impl const From for $float { #[doc = concat!("Converts a [`bool`] to [`", stringify!($float),"`] losslessly.")] /// The resulting value is positive `0.0` for `false` and `1.0` for `true` values. /// @@ -260,7 +262,8 @@ impl_float_from_bool!( macro_rules! impl_try_from_unbounded { ($source:ty => $($target:ty),+) => {$( #[stable(feature = "try_from", since = "1.34.0")] - impl TryFrom<$source> for $target { + #[rustc_const_unstable(feature = "const_try", issue = "74935")] + impl const TryFrom<$source> for $target { type Error = TryFromIntError; /// Tries to create the target number type from a source @@ -278,7 +281,8 @@ macro_rules! impl_try_from_unbounded { macro_rules! impl_try_from_lower_bounded { ($source:ty => $($target:ty),+) => {$( #[stable(feature = "try_from", since = "1.34.0")] - impl TryFrom<$source> for $target { + #[rustc_const_unstable(feature = "const_try", issue = "74935")] + impl const TryFrom<$source> for $target { type Error = TryFromIntError; /// Tries to create the target number type from a source @@ -300,7 +304,8 @@ macro_rules! impl_try_from_lower_bounded { macro_rules! impl_try_from_upper_bounded { ($source:ty => $($target:ty),+) => {$( #[stable(feature = "try_from", since = "1.34.0")] - impl TryFrom<$source> for $target { + #[rustc_const_unstable(feature = "const_try", issue = "74935")] + impl const TryFrom<$source> for $target { type Error = TryFromIntError; /// Tries to create the target number type from a source @@ -322,7 +327,8 @@ macro_rules! impl_try_from_upper_bounded { macro_rules! impl_try_from_both_bounded { ($source:ty => $($target:ty),+) => {$( #[stable(feature = "try_from", since = "1.34.0")] - impl TryFrom<$source> for $target { + #[rustc_const_unstable(feature = "const_try", issue = "74935")] + impl const TryFrom<$source> for $target { type Error = TryFromIntError; /// Tries to create the target number type from a source @@ -460,7 +466,8 @@ use crate::num::NonZero; macro_rules! impl_nonzero_int_from_nonzero_int { ($Small:ty => $Large:ty) => { #[stable(feature = "nz_int_conv", since = "1.41.0")] - impl From> for NonZero<$Large> { + #[rustc_const_unstable(feature = "const_try", issue = "74935")] + impl const From> for NonZero<$Large> { // Rustdocs on the impl block show a "[+] show undocumented items" toggle. // Rustdocs on functions do not. #[doc = concat!("Converts [NonZero]\\<[", stringify!($Small), "]> ")] @@ -550,7 +557,8 @@ impl_nonzero_int_try_from_int!(isize); macro_rules! impl_nonzero_int_try_from_nonzero_int { ($source:ty => $($target:ty),+) => {$( #[stable(feature = "nzint_try_from_nzint_conv", since = "1.49.0")] - impl TryFrom> for NonZero<$target> { + #[rustc_const_unstable(feature = "const_try", issue = "74935")] + impl const TryFrom> for NonZero<$target> { type Error = TryFromIntError; // Rustdocs on the impl block show a "[+] show undocumented items" toggle. diff --git a/library/core/src/iter/adapters/rev.rs b/library/core/src/iter/adapters/rev.rs index 06ab15d5e900d..17d3eef597dcb 100644 --- a/library/core/src/iter/adapters/rev.rs +++ b/library/core/src/iter/adapters/rev.rs @@ -20,6 +20,25 @@ impl Rev { pub(in crate::iter) fn new(iter: T) -> Rev { Rev { iter } } + + /// Consumes the `Rev`, returning the inner iterator. + /// + /// # Examples + /// + /// ```rust + /// #![feature(rev_into_inner)] + /// + /// let s = "foobar"; + /// let mut rev = s.chars().rev(); + /// assert_eq!(rev.next(), Some('r')); + /// assert_eq!(rev.next(), Some('a')); + /// assert_eq!(rev.next(), Some('b')); + /// assert_eq!(rev.into_inner().collect::(), "foo"); + /// ``` + #[unstable(feature = "rev_into_inner", issue = "144277")] + pub fn into_inner(self) -> T { + self.iter + } } #[stable(feature = "rust1", since = "1.0.0")] diff --git a/library/core/src/macros/mod.rs b/library/core/src/macros/mod.rs index 8ac6ce2242d4e..3d57da63683b2 100644 --- a/library/core/src/macros/mod.rs +++ b/library/core/src/macros/mod.rs @@ -426,8 +426,10 @@ pub macro debug_assert_matches($($arg:tt)*) { #[macro_export] #[stable(feature = "matches_macro", since = "1.42.0")] #[rustc_diagnostic_item = "matches_macro"] +#[allow_internal_unstable(non_exhaustive_omitted_patterns_lint, stmt_expr_attributes)] macro_rules! matches { ($expression:expr, $pattern:pat $(if $guard:expr)? $(,)?) => { + #[allow(non_exhaustive_omitted_patterns)] match $expression { $pattern $(if $guard)? => true, _ => false diff --git a/library/core/src/mem/drop_guard.rs b/library/core/src/mem/drop_guard.rs new file mode 100644 index 0000000000000..fecc94b815e97 --- /dev/null +++ b/library/core/src/mem/drop_guard.rs @@ -0,0 +1,155 @@ +use crate::fmt::{self, Debug}; +use crate::mem::ManuallyDrop; +use crate::ops::{Deref, DerefMut}; + +/// Wrap a value and run a closure when dropped. +/// +/// This is useful for quickly creating destructors inline. +/// +/// # Examples +/// +/// ```rust +/// # #![allow(unused)] +/// #![feature(drop_guard)] +/// +/// use std::mem::DropGuard; +/// +/// { +/// // Create a new guard around a string that will +/// // print its value when dropped. +/// let s = String::from("Chashu likes tuna"); +/// let mut s = DropGuard::new(s, |s| println!("{s}")); +/// +/// // Modify the string contained in the guard. +/// s.push_str("!!!"); +/// +/// // The guard will be dropped here, printing: +/// // "Chashu likes tuna!!!" +/// } +/// ``` +#[unstable(feature = "drop_guard", issue = "144426")] +#[doc(alias = "ScopeGuard")] +#[doc(alias = "defer")] +pub struct DropGuard +where + F: FnOnce(T), +{ + inner: ManuallyDrop, + f: ManuallyDrop, +} + +impl DropGuard +where + F: FnOnce(T), +{ + /// Create a new instance of `DropGuard`. + /// + /// # Example + /// + /// ```rust + /// # #![allow(unused)] + /// #![feature(drop_guard)] + /// + /// use std::mem::DropGuard; + /// + /// let value = String::from("Chashu likes tuna"); + /// let guard = DropGuard::new(value, |s| println!("{s}")); + /// ``` + #[unstable(feature = "drop_guard", issue = "144426")] + #[must_use] + pub const fn new(inner: T, f: F) -> Self { + Self { inner: ManuallyDrop::new(inner), f: ManuallyDrop::new(f) } + } + + /// Consumes the `DropGuard`, returning the wrapped value. + /// + /// This will not execute the closure. This is implemented as an associated + /// function to prevent any potential conflicts with any other methods called + /// `into_inner` from the `Deref` and `DerefMut` impls. + /// + /// It is typically preferred to call this function instead of `mem::forget` + /// because it will return the stored value and drop variables captured + /// by the closure instead of leaking their owned resources. + /// + /// # Example + /// + /// ```rust + /// # #![allow(unused)] + /// #![feature(drop_guard)] + /// + /// use std::mem::DropGuard; + /// + /// let value = String::from("Nori likes chicken"); + /// let guard = DropGuard::new(value, |s| println!("{s}")); + /// assert_eq!(DropGuard::into_inner(guard), "Nori likes chicken"); + /// ``` + #[unstable(feature = "drop_guard", issue = "144426")] + #[inline] + pub fn into_inner(guard: Self) -> T { + // First we ensure that dropping the guard will not trigger + // its destructor + let mut guard = ManuallyDrop::new(guard); + + // Next we manually read the stored value from the guard. + // + // SAFETY: this is safe because we've taken ownership of the guard. + let value = unsafe { ManuallyDrop::take(&mut guard.inner) }; + + // Finally we drop the stored closure. We do this *after* having read + // the value, so that even if the closure's `drop` function panics, + // unwinding still tries to drop the value. + // + // SAFETY: this is safe because we've taken ownership of the guard. + unsafe { ManuallyDrop::drop(&mut guard.f) }; + value + } +} + +#[unstable(feature = "drop_guard", issue = "144426")] +impl Deref for DropGuard +where + F: FnOnce(T), +{ + type Target = T; + + fn deref(&self) -> &T { + &*self.inner + } +} + +#[unstable(feature = "drop_guard", issue = "144426")] +impl DerefMut for DropGuard +where + F: FnOnce(T), +{ + fn deref_mut(&mut self) -> &mut T { + &mut *self.inner + } +} + +#[unstable(feature = "drop_guard", issue = "144426")] +impl Drop for DropGuard +where + F: FnOnce(T), +{ + fn drop(&mut self) { + // SAFETY: `DropGuard` is in the process of being dropped. + let inner = unsafe { ManuallyDrop::take(&mut self.inner) }; + + // SAFETY: `DropGuard` is in the process of being dropped. + let f = unsafe { ManuallyDrop::take(&mut self.f) }; + + f(inner); + } +} + +#[unstable(feature = "drop_guard", issue = "144426")] +impl Debug for DropGuard +where + T: Debug, + F: FnOnce(T), +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&**self, f) + } +} diff --git a/library/core/src/mem/mod.rs b/library/core/src/mem/mod.rs index 12113c227131c..fb22fdb43d943 100644 --- a/library/core/src/mem/mod.rs +++ b/library/core/src/mem/mod.rs @@ -23,6 +23,10 @@ mod transmutability; #[unstable(feature = "transmutability", issue = "99571")] pub use transmutability::{Assume, TransmuteFrom}; +mod drop_guard; +#[unstable(feature = "drop_guard", issue = "144426")] +pub use drop_guard::DropGuard; + // This one has to be a re-export (rather than wrapping the underlying intrinsic) so that we can do // the special magic "types have equal size" check at the call site. #[stable(feature = "rust1", since = "1.0.0")] @@ -964,7 +968,7 @@ pub fn drop(_x: T) {} /// /// This function is not magic; it is literally defined as /// ``` -/// pub fn copy(x: &T) -> T { *x } +/// pub const fn copy(x: &T) -> T { *x } /// ``` /// /// It is useful when you want to pass a function pointer to a combinator, rather than defining a new closure. diff --git a/library/core/src/net/ip_addr.rs b/library/core/src/net/ip_addr.rs index 49a7ae5de5c8f..6adeb2aa3fdad 100644 --- a/library/core/src/net/ip_addr.rs +++ b/library/core/src/net/ip_addr.rs @@ -1088,7 +1088,8 @@ impl fmt::Debug for IpAddr { } #[stable(feature = "ip_from_ip", since = "1.16.0")] -impl From for IpAddr { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const From for IpAddr { /// Copies this address to a new `IpAddr::V4`. /// /// # Examples @@ -1110,7 +1111,8 @@ impl From for IpAddr { } #[stable(feature = "ip_from_ip", since = "1.16.0")] -impl From for IpAddr { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const From for IpAddr { /// Copies this address to a new `IpAddr::V6`. /// /// # Examples @@ -1220,7 +1222,8 @@ impl Ord for Ipv4Addr { } #[stable(feature = "ip_u32", since = "1.1.0")] -impl From for u32 { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const From for u32 { /// Uses [`Ipv4Addr::to_bits`] to convert an IPv4 address to a host byte order `u32`. #[inline] fn from(ip: Ipv4Addr) -> u32 { @@ -1229,7 +1232,8 @@ impl From for u32 { } #[stable(feature = "ip_u32", since = "1.1.0")] -impl From for Ipv4Addr { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const From for Ipv4Addr { /// Uses [`Ipv4Addr::from_bits`] to convert a host byte order `u32` into an IPv4 address. #[inline] fn from(ip: u32) -> Ipv4Addr { @@ -1238,7 +1242,8 @@ impl From for Ipv4Addr { } #[stable(feature = "from_slice_v4", since = "1.9.0")] -impl From<[u8; 4]> for Ipv4Addr { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const From<[u8; 4]> for Ipv4Addr { /// Creates an `Ipv4Addr` from a four element byte array. /// /// # Examples @@ -1256,7 +1261,8 @@ impl From<[u8; 4]> for Ipv4Addr { } #[stable(feature = "ip_from_slice", since = "1.17.0")] -impl From<[u8; 4]> for IpAddr { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const From<[u8; 4]> for IpAddr { /// Creates an `IpAddr::V4` from a four element byte array. /// /// # Examples @@ -2210,7 +2216,8 @@ impl Ord for Ipv6Addr { } #[stable(feature = "i128", since = "1.26.0")] -impl From for u128 { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const From for u128 { /// Uses [`Ipv6Addr::to_bits`] to convert an IPv6 address to a host byte order `u128`. #[inline] fn from(ip: Ipv6Addr) -> u128 { @@ -2218,7 +2225,8 @@ impl From for u128 { } } #[stable(feature = "i128", since = "1.26.0")] -impl From for Ipv6Addr { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const From for Ipv6Addr { /// Uses [`Ipv6Addr::from_bits`] to convert a host byte order `u128` to an IPv6 address. #[inline] fn from(ip: u128) -> Ipv6Addr { @@ -2227,7 +2235,8 @@ impl From for Ipv6Addr { } #[stable(feature = "ipv6_from_octets", since = "1.9.0")] -impl From<[u8; 16]> for Ipv6Addr { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const From<[u8; 16]> for Ipv6Addr { /// Creates an `Ipv6Addr` from a sixteen element byte array. /// /// # Examples @@ -2254,7 +2263,8 @@ impl From<[u8; 16]> for Ipv6Addr { } #[stable(feature = "ipv6_from_segments", since = "1.16.0")] -impl From<[u16; 8]> for Ipv6Addr { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const From<[u16; 8]> for Ipv6Addr { /// Creates an `Ipv6Addr` from an eight element 16-bit array. /// /// # Examples @@ -2282,7 +2292,8 @@ impl From<[u16; 8]> for Ipv6Addr { } #[stable(feature = "ip_from_slice", since = "1.17.0")] -impl From<[u8; 16]> for IpAddr { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const From<[u8; 16]> for IpAddr { /// Creates an `IpAddr::V6` from a sixteen element byte array. /// /// # Examples @@ -2309,7 +2320,8 @@ impl From<[u8; 16]> for IpAddr { } #[stable(feature = "ip_from_slice", since = "1.17.0")] -impl From<[u16; 8]> for IpAddr { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const From<[u16; 8]> for IpAddr { /// Creates an `IpAddr::V6` from an eight element 16-bit array. /// /// # Examples diff --git a/library/core/src/net/socket_addr.rs b/library/core/src/net/socket_addr.rs index 936f9f64930d5..69924199f99f1 100644 --- a/library/core/src/net/socket_addr.rs +++ b/library/core/src/net/socket_addr.rs @@ -592,7 +592,8 @@ impl SocketAddrV6 { } #[stable(feature = "ip_from_ip", since = "1.16.0")] -impl From for SocketAddr { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const From for SocketAddr { /// Converts a [`SocketAddrV4`] into a [`SocketAddr::V4`]. #[inline] fn from(sock4: SocketAddrV4) -> SocketAddr { @@ -601,7 +602,8 @@ impl From for SocketAddr { } #[stable(feature = "ip_from_ip", since = "1.16.0")] -impl From for SocketAddr { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const From for SocketAddr { /// Converts a [`SocketAddrV6`] into a [`SocketAddr::V6`]. #[inline] fn from(sock6: SocketAddrV6) -> SocketAddr { @@ -610,7 +612,8 @@ impl From for SocketAddr { } #[stable(feature = "addr_from_into_ip", since = "1.17.0")] -impl> From<(I, u16)> for SocketAddr { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl> const From<(I, u16)> for SocketAddr { /// Converts a tuple struct (Into<[`IpAddr`]>, `u16`) into a [`SocketAddr`]. /// /// This conversion creates a [`SocketAddr::V4`] for an [`IpAddr::V4`] diff --git a/library/core/src/num/error.rs b/library/core/src/num/error.rs index f9c4cdd0ebe63..cfedd465cab0b 100644 --- a/library/core/src/num/error.rs +++ b/library/core/src/num/error.rs @@ -26,14 +26,16 @@ impl Error for TryFromIntError { } #[stable(feature = "try_from", since = "1.34.0")] -impl From for TryFromIntError { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const From for TryFromIntError { fn from(x: Infallible) -> TryFromIntError { match x {} } } #[unstable(feature = "never_type", issue = "35121")] -impl From for TryFromIntError { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const From for TryFromIntError { #[inline] fn from(never: !) -> TryFromIntError { // Match rather than coerce to make sure that code like diff --git a/library/core/src/num/flt2dec/mod.rs b/library/core/src/num/flt2dec/mod.rs index 7601e3e2c58a2..e79a00a865969 100644 --- a/library/core/src/num/flt2dec/mod.rs +++ b/library/core/src/num/flt2dec/mod.rs @@ -150,23 +150,19 @@ pub fn round_up(d: &mut [u8]) -> Option { Some(i) => { // d[i+1..n] is all nines d[i] += 1; - for j in i + 1..d.len() { - d[j] = b'0'; - } + d[i + 1..].fill(b'0'); None } - None if d.len() > 0 => { + None if d.is_empty() => { + // an empty buffer rounds up (a bit strange but reasonable) + Some(b'1') + } + None => { // 999..999 rounds to 1000..000 with an increased exponent d[0] = b'1'; - for j in 1..d.len() { - d[j] = b'0'; - } + d[1..].fill(b'0'); Some(b'0') } - None => { - // an empty buffer rounds up (a bit strange but reasonable) - Some(b'1') - } } } diff --git a/library/core/src/num/mod.rs b/library/core/src/num/mod.rs index cf05666698531..d1ecbbbbb8247 100644 --- a/library/core/src/num/mod.rs +++ b/library/core/src/num/mod.rs @@ -1382,7 +1382,8 @@ const fn from_ascii_radix_panic(radix: u32) -> ! { macro_rules! from_str_int_impl { ($signedness:ident $($int_ty:ty)+) => {$( #[stable(feature = "rust1", since = "1.0.0")] - impl FromStr for $int_ty { + #[rustc_const_unstable(feature = "const_try", issue = "74935")] + impl const FromStr for $int_ty { type Err = ParseIntError; /// Parses an integer from a string slice with decimal digits. diff --git a/library/core/src/num/nonzero.rs b/library/core/src/num/nonzero.rs index 56a4ac621b6ea..9720ade98e04e 100644 --- a/library/core/src/num/nonzero.rs +++ b/library/core/src/num/nonzero.rs @@ -301,7 +301,8 @@ where } #[stable(feature = "from_nonzero", since = "1.31.0")] -impl From> for T +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const From> for T where T: ZeroablePrimitive, { diff --git a/library/core/src/ops/deref.rs b/library/core/src/ops/deref.rs index 9d9d18095bc64..c2dede9fa0889 100644 --- a/library/core/src/ops/deref.rs +++ b/library/core/src/ops/deref.rs @@ -158,7 +158,7 @@ impl const Deref for &T { #[rustc_diagnostic_item = "noop_method_deref"] fn deref(&self) -> &T { - *self + self } } @@ -171,7 +171,7 @@ impl const Deref for &mut T { type Target = T; fn deref(&self) -> &T { - *self + self } } @@ -280,7 +280,7 @@ pub trait DerefMut: ~const Deref + PointeeSized { #[rustc_const_unstable(feature = "const_deref", issue = "88955")] impl const DerefMut for &mut T { fn deref_mut(&mut self) -> &mut T { - *self + self } } diff --git a/library/core/src/ops/range.rs b/library/core/src/ops/range.rs index ad3b6439a6105..f33a33e6b752f 100644 --- a/library/core/src/ops/range.rs +++ b/library/core/src/ops/range.rs @@ -1141,6 +1141,12 @@ impl<'a, T: ?Sized + 'a> RangeBounds for (Bound<&'a T>, Bound<&'a T>) { } } +// This impl intentionally does not have `T: ?Sized`; +// see https://github.com/rust-lang/rust/pull/61584 for discussion of why. +// +/// If you need to use this implementation where `T` is unsized, +/// consider using the `RangeBounds` impl for a 2-tuple of [`Bound<&T>`][Bound], +/// i.e. replace `start..` with `(Bound::Included(start), Bound::Unbounded)`. #[stable(feature = "collections_range", since = "1.28.0")] impl RangeBounds for RangeFrom<&T> { fn start_bound(&self) -> Bound<&T> { @@ -1151,6 +1157,12 @@ impl RangeBounds for RangeFrom<&T> { } } +// This impl intentionally does not have `T: ?Sized`; +// see https://github.com/rust-lang/rust/pull/61584 for discussion of why. +// +/// If you need to use this implementation where `T` is unsized, +/// consider using the `RangeBounds` impl for a 2-tuple of [`Bound<&T>`][Bound], +/// i.e. replace `..end` with `(Bound::Unbounded, Bound::Excluded(end))`. #[stable(feature = "collections_range", since = "1.28.0")] impl RangeBounds for RangeTo<&T> { fn start_bound(&self) -> Bound<&T> { @@ -1161,6 +1173,12 @@ impl RangeBounds for RangeTo<&T> { } } +// This impl intentionally does not have `T: ?Sized`; +// see https://github.com/rust-lang/rust/pull/61584 for discussion of why. +// +/// If you need to use this implementation where `T` is unsized, +/// consider using the `RangeBounds` impl for a 2-tuple of [`Bound<&T>`][Bound], +/// i.e. replace `start..end` with `(Bound::Included(start), Bound::Excluded(end))`. #[stable(feature = "collections_range", since = "1.28.0")] impl RangeBounds for Range<&T> { fn start_bound(&self) -> Bound<&T> { @@ -1171,6 +1189,12 @@ impl RangeBounds for Range<&T> { } } +// This impl intentionally does not have `T: ?Sized`; +// see https://github.com/rust-lang/rust/pull/61584 for discussion of why. +// +/// If you need to use this implementation where `T` is unsized, +/// consider using the `RangeBounds` impl for a 2-tuple of [`Bound<&T>`][Bound], +/// i.e. replace `start..=end` with `(Bound::Included(start), Bound::Included(end))`. #[stable(feature = "collections_range", since = "1.28.0")] impl RangeBounds for RangeInclusive<&T> { fn start_bound(&self) -> Bound<&T> { @@ -1181,6 +1205,12 @@ impl RangeBounds for RangeInclusive<&T> { } } +// This impl intentionally does not have `T: ?Sized`; +// see https://github.com/rust-lang/rust/pull/61584 for discussion of why. +// +/// If you need to use this implementation where `T` is unsized, +/// consider using the `RangeBounds` impl for a 2-tuple of [`Bound<&T>`][Bound], +/// i.e. replace `..=end` with `(Bound::Unbounded, Bound::Included(end))`. #[stable(feature = "collections_range", since = "1.28.0")] impl RangeBounds for RangeToInclusive<&T> { fn start_bound(&self) -> Bound<&T> { diff --git a/library/core/src/ops/try_trait.rs b/library/core/src/ops/try_trait.rs index aebbddb4f1c07..a889c824be53d 100644 --- a/library/core/src/ops/try_trait.rs +++ b/library/core/src/ops/try_trait.rs @@ -128,7 +128,9 @@ use crate::ops::ControlFlow; )] #[doc(alias = "?")] #[lang = "Try"] -pub trait Try: FromResidual { +#[const_trait] +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +pub trait Try: ~const FromResidual { /// The type of the value produced by `?` when *not* short-circuiting. #[unstable(feature = "try_trait_v2", issue = "84277", old_name = "try_trait")] type Output; @@ -304,6 +306,8 @@ pub trait Try: FromResidual { )] #[rustc_diagnostic_item = "FromResidual"] #[unstable(feature = "try_trait_v2", issue = "84277", old_name = "try_trait")] +#[const_trait] +#[rustc_const_unstable(feature = "const_try", issue = "74935")] pub trait FromResidual::Residual> { /// Constructs the type from a compatible `Residual` type. /// @@ -357,6 +361,8 @@ where /// and in the other direction, /// ` as Residual>::TryType = Result`. #[unstable(feature = "try_trait_v2_residual", issue = "91285")] +#[const_trait] +#[rustc_const_unstable(feature = "const_try", issue = "74935")] pub trait Residual { /// The "return" type of this meta-function. #[unstable(feature = "try_trait_v2_residual", issue = "91285")] diff --git a/library/core/src/option.rs b/library/core/src/option.rs index d2365749f6129..4ec0a1cb91f44 100644 --- a/library/core/src/option.rs +++ b/library/core/src/option.rs @@ -842,7 +842,7 @@ impl Option { // just needs to be aligned, which it is because `&self` is aligned and // the offset used is a multiple of alignment. // - // In the new version, the intrinsic always returns a pointer to an + // Here we assume that `offset_of!` always returns an offset to an // in-bounds and correctly aligned position for a `T` (even if in the // `None` case it's just padding). unsafe { @@ -2144,9 +2144,12 @@ const fn expect_failed(msg: &str) -> ! { ///////////////////////////////////////////////////////////////////////////// #[stable(feature = "rust1", since = "1.0.0")] -impl Clone for Option +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const Clone for Option where - T: Clone, + // FIXME(const_hack): the T: ~const Destruct should be inferred from the Self: ~const Destruct in clone_from. + // See https://github.com/rust-lang/rust/issues/144207 + T: ~const Clone + ~const Destruct, { #[inline] fn clone(&self) -> Self { @@ -2230,7 +2233,8 @@ impl<'a, T> IntoIterator for &'a mut Option { } #[stable(since = "1.12.0", feature = "option_from")] -impl From for Option { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const From for Option { /// Moves `val` into a new [`Some`]. /// /// # Examples @@ -2246,7 +2250,8 @@ impl From for Option { } #[stable(feature = "option_ref_from_ref_option", since = "1.30.0")] -impl<'a, T> From<&'a Option> for Option<&'a T> { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl<'a, T> const From<&'a Option> for Option<&'a T> { /// Converts from `&Option` to `Option<&T>`. /// /// # Examples @@ -2273,7 +2278,8 @@ impl<'a, T> From<&'a Option> for Option<&'a T> { } #[stable(feature = "option_ref_from_ref_option", since = "1.30.0")] -impl<'a, T> From<&'a mut Option> for Option<&'a mut T> { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl<'a, T> const From<&'a mut Option> for Option<&'a mut T> { /// Converts from `&mut Option` to `Option<&mut T>` /// /// # Examples @@ -2593,7 +2599,8 @@ impl> FromIterator> for Option { } #[unstable(feature = "try_trait_v2", issue = "84277", old_name = "try_trait")] -impl ops::Try for Option { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const ops::Try for Option { type Output = T; type Residual = Option; @@ -2612,9 +2619,10 @@ impl ops::Try for Option { } #[unstable(feature = "try_trait_v2", issue = "84277", old_name = "try_trait")] +#[rustc_const_unstable(feature = "const_try", issue = "74935")] // Note: manually specifying the residual type instead of using the default to work around // https://github.com/rust-lang/rust/issues/99940 -impl ops::FromResidual> for Option { +impl const ops::FromResidual> for Option { #[inline] fn from_residual(residual: Option) -> Self { match residual { @@ -2625,7 +2633,8 @@ impl ops::FromResidual> for Option { #[diagnostic::do_not_recommend] #[unstable(feature = "try_trait_v2_yeet", issue = "96374")] -impl ops::FromResidual> for Option { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const ops::FromResidual> for Option { #[inline] fn from_residual(ops::Yeet(()): ops::Yeet<()>) -> Self { None @@ -2633,7 +2642,8 @@ impl ops::FromResidual> for Option { } #[unstable(feature = "try_trait_v2_residual", issue = "91285")] -impl ops::Residual for Option { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const ops::Residual for Option { type TryType = Option; } diff --git a/library/core/src/ptr/alignment.rs b/library/core/src/ptr/alignment.rs index b658bbb24eddd..99bf323ec572c 100644 --- a/library/core/src/ptr/alignment.rs +++ b/library/core/src/ptr/alignment.rs @@ -214,7 +214,8 @@ impl TryFrom for Alignment { } #[unstable(feature = "ptr_alignment_type", issue = "102070")] -impl From for NonZero { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const From for NonZero { #[inline] fn from(align: Alignment) -> NonZero { align.as_nonzero() @@ -222,7 +223,8 @@ impl From for NonZero { } #[unstable(feature = "ptr_alignment_type", issue = "102070")] -impl From for usize { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const From for usize { #[inline] fn from(align: Alignment) -> usize { align.as_usize() diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs index 6e537d6c15079..5e1152835056d 100644 --- a/library/core/src/ptr/mod.rs +++ b/library/core/src/ptr/mod.rs @@ -976,9 +976,10 @@ pub const fn dangling_mut() -> *mut T { #[must_use] #[inline(always)] #[stable(feature = "exposed_provenance", since = "1.84.0")] +#[rustc_const_unstable(feature = "const_exposed_provenance", issue = "144538")] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces #[allow(fuzzy_provenance_casts)] // this *is* the explicit provenance API one should use instead -pub fn with_exposed_provenance(addr: usize) -> *const T { +pub const fn with_exposed_provenance(addr: usize) -> *const T { addr as *const T } @@ -1016,9 +1017,10 @@ pub fn with_exposed_provenance(addr: usize) -> *const T { #[must_use] #[inline(always)] #[stable(feature = "exposed_provenance", since = "1.84.0")] +#[rustc_const_unstable(feature = "const_exposed_provenance", issue = "144538")] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces #[allow(fuzzy_provenance_casts)] // this *is* the explicit provenance API one should use instead -pub fn with_exposed_provenance_mut(addr: usize) -> *mut T { +pub const fn with_exposed_provenance_mut(addr: usize) -> *mut T { addr as *mut T } diff --git a/library/core/src/ptr/unique.rs b/library/core/src/ptr/unique.rs index 006b7e62ae95d..19fdb25d81375 100644 --- a/library/core/src/ptr/unique.rs +++ b/library/core/src/ptr/unique.rs @@ -36,8 +36,6 @@ use crate::ptr::NonNull; )] #[doc(hidden)] #[repr(transparent)] -// Lang item used experimentally by Miri to define the semantics of `Unique`. -#[lang = "ptr_unique"] pub struct Unique { pointer: NonNull, // NOTE: this marker has no consequences for variance, but is necessary diff --git a/library/core/src/range.rs b/library/core/src/range.rs index 5cd7956291ca2..7158fa0fcf069 100644 --- a/library/core/src/range.rs +++ b/library/core/src/range.rs @@ -167,6 +167,12 @@ impl RangeBounds for Range { } } +// This impl intentionally does not have `T: ?Sized`; +// see https://github.com/rust-lang/rust/pull/61584 for discussion of why. +// +/// If you need to use this implementation where `T` is unsized, +/// consider using the `RangeBounds` impl for a 2-tuple of [`Bound<&T>`][Bound], +/// i.e. replace `start..end` with `(Bound::Included(start), Bound::Excluded(end))`. #[unstable(feature = "new_range_api", issue = "125687")] impl RangeBounds for Range<&T> { fn start_bound(&self) -> Bound<&T> { @@ -346,6 +352,12 @@ impl RangeBounds for RangeInclusive { } } +// This impl intentionally does not have `T: ?Sized`; +// see https://github.com/rust-lang/rust/pull/61584 for discussion of why. +// +/// If you need to use this implementation where `T` is unsized, +/// consider using the `RangeBounds` impl for a 2-tuple of [`Bound<&T>`][Bound], +/// i.e. replace `start..=end` with `(Bound::Included(start), Bound::Included(end))`. #[unstable(feature = "new_range_api", issue = "125687")] impl RangeBounds for RangeInclusive<&T> { fn start_bound(&self) -> Bound<&T> { @@ -491,6 +503,12 @@ impl RangeBounds for RangeFrom { } } +// This impl intentionally does not have `T: ?Sized`; +// see https://github.com/rust-lang/rust/pull/61584 for discussion of why. +// +/// If you need to use this implementation where `T` is unsized, +/// consider using the `RangeBounds` impl for a 2-tuple of [`Bound<&T>`][Bound], +/// i.e. replace `start..` with `(Bound::Included(start), Bound::Unbounded)`. #[unstable(feature = "new_range_api", issue = "125687")] impl RangeBounds for RangeFrom<&T> { fn start_bound(&self) -> Bound<&T> { diff --git a/library/core/src/result.rs b/library/core/src/result.rs index 7f3f296498544..f65257ff59b92 100644 --- a/library/core/src/result.rs +++ b/library/core/src/result.rs @@ -1288,9 +1288,11 @@ impl Result { /// ``` #[unstable(feature = "unwrap_infallible", reason = "newly added", issue = "61695")] #[inline] - pub fn into_ok(self) -> T + #[rustc_allow_const_fn_unstable(const_precise_live_drops)] + #[rustc_const_unstable(feature = "const_try", issue = "74935")] + pub const fn into_ok(self) -> T where - E: Into, + E: ~const Into, { match self { Ok(x) => x, @@ -1323,9 +1325,11 @@ impl Result { /// ``` #[unstable(feature = "unwrap_infallible", reason = "newly added", issue = "61695")] #[inline] - pub fn into_err(self) -> E + #[rustc_allow_const_fn_unstable(const_precise_live_drops)] + #[rustc_const_unstable(feature = "const_try", issue = "74935")] + pub const fn into_err(self) -> E where - T: Into, + T: ~const Into, { match self { Ok(x) => x.into(), @@ -2052,7 +2056,8 @@ impl> FromIterator> for Result { } #[unstable(feature = "try_trait_v2", issue = "84277", old_name = "try_trait")] -impl ops::Try for Result { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const ops::Try for Result { type Output = T; type Residual = Result; @@ -2071,7 +2076,10 @@ impl ops::Try for Result { } #[unstable(feature = "try_trait_v2", issue = "84277", old_name = "try_trait")] -impl> ops::FromResidual> for Result { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl> const ops::FromResidual> + for Result +{ #[inline] #[track_caller] fn from_residual(residual: Result) -> Self { @@ -2082,7 +2090,8 @@ impl> ops::FromResidual> for Res } #[diagnostic::do_not_recommend] #[unstable(feature = "try_trait_v2_yeet", issue = "96374")] -impl> ops::FromResidual> for Result { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl> const ops::FromResidual> for Result { #[inline] fn from_residual(ops::Yeet(e): ops::Yeet) -> Self { Err(From::from(e)) @@ -2090,6 +2099,7 @@ impl> ops::FromResidual> for Result { } #[unstable(feature = "try_trait_v2_residual", issue = "91285")] -impl ops::Residual for Result { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const ops::Residual for Result { type TryType = Result; } diff --git a/library/core/src/slice/iter.rs b/library/core/src/slice/iter.rs index 421959a74e8ec..22b78418d67ee 100644 --- a/library/core/src/slice/iter.rs +++ b/library/core/src/slice/iter.rs @@ -2353,255 +2353,6 @@ impl ExactSizeIterator for ArrayWindows<'_, T, N> { } } -/// An iterator over a slice in (non-overlapping) chunks (`N` elements at a -/// time), starting at the beginning of the slice. -/// -/// When the slice len is not evenly divided by the chunk size, the last -/// up to `N-1` elements will be omitted but can be retrieved from -/// the [`remainder`] function from the iterator. -/// -/// This struct is created by the [`array_chunks`] method on [slices]. -/// -/// # Example -/// -/// ``` -/// #![feature(array_chunks)] -/// -/// let slice = ['l', 'o', 'r', 'e', 'm']; -/// let mut iter = slice.array_chunks::<2>(); -/// assert_eq!(iter.next(), Some(&['l', 'o'])); -/// assert_eq!(iter.next(), Some(&['r', 'e'])); -/// assert_eq!(iter.next(), None); -/// ``` -/// -/// [`array_chunks`]: slice::array_chunks -/// [`remainder`]: ArrayChunks::remainder -/// [slices]: slice -#[derive(Debug)] -#[unstable(feature = "array_chunks", issue = "74985")] -#[must_use = "iterators are lazy and do nothing unless consumed"] -pub struct ArrayChunks<'a, T: 'a, const N: usize> { - iter: Iter<'a, [T; N]>, - rem: &'a [T], -} - -impl<'a, T, const N: usize> ArrayChunks<'a, T, N> { - #[rustc_const_unstable(feature = "const_slice_make_iter", issue = "137737")] - #[inline] - pub(super) const fn new(slice: &'a [T]) -> Self { - let (array_slice, rem) = slice.as_chunks(); - Self { iter: array_slice.iter(), rem } - } - - /// Returns the remainder of the original slice that is not going to be - /// returned by the iterator. The returned slice has at most `N-1` - /// elements. - #[must_use] - #[unstable(feature = "array_chunks", issue = "74985")] - pub fn remainder(&self) -> &'a [T] { - self.rem - } -} - -// FIXME(#26925) Remove in favor of `#[derive(Clone)]` -#[unstable(feature = "array_chunks", issue = "74985")] -impl Clone for ArrayChunks<'_, T, N> { - fn clone(&self) -> Self { - ArrayChunks { iter: self.iter.clone(), rem: self.rem } - } -} - -#[unstable(feature = "array_chunks", issue = "74985")] -impl<'a, T, const N: usize> Iterator for ArrayChunks<'a, T, N> { - type Item = &'a [T; N]; - - #[inline] - fn next(&mut self) -> Option<&'a [T; N]> { - self.iter.next() - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } - - #[inline] - fn count(self) -> usize { - self.iter.count() - } - - #[inline] - fn nth(&mut self, n: usize) -> Option { - self.iter.nth(n) - } - - #[inline] - fn last(self) -> Option { - self.iter.last() - } - - unsafe fn __iterator_get_unchecked(&mut self, i: usize) -> &'a [T; N] { - // SAFETY: The safety guarantees of `__iterator_get_unchecked` are - // transferred to the caller. - unsafe { self.iter.__iterator_get_unchecked(i) } - } -} - -#[unstable(feature = "array_chunks", issue = "74985")] -impl<'a, T, const N: usize> DoubleEndedIterator for ArrayChunks<'a, T, N> { - #[inline] - fn next_back(&mut self) -> Option<&'a [T; N]> { - self.iter.next_back() - } - - #[inline] - fn nth_back(&mut self, n: usize) -> Option { - self.iter.nth_back(n) - } -} - -#[unstable(feature = "array_chunks", issue = "74985")] -impl ExactSizeIterator for ArrayChunks<'_, T, N> { - fn is_empty(&self) -> bool { - self.iter.is_empty() - } -} - -#[unstable(feature = "trusted_len", issue = "37572")] -unsafe impl TrustedLen for ArrayChunks<'_, T, N> {} - -#[unstable(feature = "array_chunks", issue = "74985")] -impl FusedIterator for ArrayChunks<'_, T, N> {} - -#[doc(hidden)] -#[unstable(feature = "array_chunks", issue = "74985")] -unsafe impl<'a, T, const N: usize> TrustedRandomAccess for ArrayChunks<'a, T, N> {} - -#[doc(hidden)] -#[unstable(feature = "array_chunks", issue = "74985")] -unsafe impl<'a, T, const N: usize> TrustedRandomAccessNoCoerce for ArrayChunks<'a, T, N> { - const MAY_HAVE_SIDE_EFFECT: bool = false; -} - -/// An iterator over a slice in (non-overlapping) mutable chunks (`N` elements -/// at a time), starting at the beginning of the slice. -/// -/// When the slice len is not evenly divided by the chunk size, the last -/// up to `N-1` elements will be omitted but can be retrieved from -/// the [`into_remainder`] function from the iterator. -/// -/// This struct is created by the [`array_chunks_mut`] method on [slices]. -/// -/// # Example -/// -/// ``` -/// #![feature(array_chunks)] -/// -/// let mut slice = ['l', 'o', 'r', 'e', 'm']; -/// let iter = slice.array_chunks_mut::<2>(); -/// ``` -/// -/// [`array_chunks_mut`]: slice::array_chunks_mut -/// [`into_remainder`]: ../../std/slice/struct.ArrayChunksMut.html#method.into_remainder -/// [slices]: slice -#[derive(Debug)] -#[unstable(feature = "array_chunks", issue = "74985")] -#[must_use = "iterators are lazy and do nothing unless consumed"] -pub struct ArrayChunksMut<'a, T: 'a, const N: usize> { - iter: IterMut<'a, [T; N]>, - rem: &'a mut [T], -} - -impl<'a, T, const N: usize> ArrayChunksMut<'a, T, N> { - #[rustc_const_unstable(feature = "const_slice_make_iter", issue = "137737")] - #[inline] - pub(super) const fn new(slice: &'a mut [T]) -> Self { - let (array_slice, rem) = slice.as_chunks_mut(); - Self { iter: array_slice.iter_mut(), rem } - } - - /// Returns the remainder of the original slice that is not going to be - /// returned by the iterator. The returned slice has at most `N-1` - /// elements. - #[must_use = "`self` will be dropped if the result is not used"] - #[unstable(feature = "array_chunks", issue = "74985")] - pub fn into_remainder(self) -> &'a mut [T] { - self.rem - } -} - -#[unstable(feature = "array_chunks", issue = "74985")] -impl<'a, T, const N: usize> Iterator for ArrayChunksMut<'a, T, N> { - type Item = &'a mut [T; N]; - - #[inline] - fn next(&mut self) -> Option<&'a mut [T; N]> { - self.iter.next() - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } - - #[inline] - fn count(self) -> usize { - self.iter.count() - } - - #[inline] - fn nth(&mut self, n: usize) -> Option { - self.iter.nth(n) - } - - #[inline] - fn last(self) -> Option { - self.iter.last() - } - - unsafe fn __iterator_get_unchecked(&mut self, i: usize) -> &'a mut [T; N] { - // SAFETY: The safety guarantees of `__iterator_get_unchecked` are transferred to - // the caller. - unsafe { self.iter.__iterator_get_unchecked(i) } - } -} - -#[unstable(feature = "array_chunks", issue = "74985")] -impl<'a, T, const N: usize> DoubleEndedIterator for ArrayChunksMut<'a, T, N> { - #[inline] - fn next_back(&mut self) -> Option<&'a mut [T; N]> { - self.iter.next_back() - } - - #[inline] - fn nth_back(&mut self, n: usize) -> Option { - self.iter.nth_back(n) - } -} - -#[unstable(feature = "array_chunks", issue = "74985")] -impl ExactSizeIterator for ArrayChunksMut<'_, T, N> { - fn is_empty(&self) -> bool { - self.iter.is_empty() - } -} - -#[unstable(feature = "trusted_len", issue = "37572")] -unsafe impl TrustedLen for ArrayChunksMut<'_, T, N> {} - -#[unstable(feature = "array_chunks", issue = "74985")] -impl FusedIterator for ArrayChunksMut<'_, T, N> {} - -#[doc(hidden)] -#[unstable(feature = "array_chunks", issue = "74985")] -unsafe impl<'a, T, const N: usize> TrustedRandomAccess for ArrayChunksMut<'a, T, N> {} - -#[doc(hidden)] -#[unstable(feature = "array_chunks", issue = "74985")] -unsafe impl<'a, T, const N: usize> TrustedRandomAccessNoCoerce for ArrayChunksMut<'a, T, N> { - const MAY_HAVE_SIDE_EFFECT: bool = false; -} - /// An iterator over a slice in (non-overlapping) chunks (`chunk_size` elements at a /// time), starting at the end of the slice. /// diff --git a/library/core/src/slice/mod.rs b/library/core/src/slice/mod.rs index 1a38ad5cd61fa..d3e78f011daa7 100644 --- a/library/core/src/slice/mod.rs +++ b/library/core/src/slice/mod.rs @@ -56,8 +56,6 @@ pub use index::SliceIndex; pub use index::{range, try_range}; #[unstable(feature = "array_windows", issue = "75027")] pub use iter::ArrayWindows; -#[unstable(feature = "array_chunks", issue = "74985")] -pub use iter::{ArrayChunks, ArrayChunksMut}; #[stable(feature = "slice_group_by", since = "1.77.0")] pub use iter::{ChunkBy, ChunkByMut}; #[stable(feature = "rust1", since = "1.0.0")] @@ -1236,7 +1234,7 @@ impl [T] { /// /// [`chunks`]: slice::chunks /// [`rchunks_exact`]: slice::rchunks_exact - /// [`as_chunks`]: slice::chunks + /// [`as_chunks`]: slice::as_chunks #[stable(feature = "chunks_exact", since = "1.31.0")] #[rustc_const_unstable(feature = "const_slice_make_iter", issue = "137737")] #[inline] @@ -1452,42 +1450,6 @@ impl [T] { (remainder, array_slice) } - /// Returns an iterator over `N` elements of the slice at a time, starting at the - /// beginning of the slice. - /// - /// The chunks are array references and do not overlap. If `N` does not divide the - /// length of the slice, then the last up to `N-1` elements will be omitted and can be - /// retrieved from the `remainder` function of the iterator. - /// - /// This method is the const generic equivalent of [`chunks_exact`]. - /// - /// # Panics - /// - /// Panics if `N` is zero. This check will most probably get changed to a compile time - /// error before this method gets stabilized. - /// - /// # Examples - /// - /// ``` - /// #![feature(array_chunks)] - /// let slice = ['l', 'o', 'r', 'e', 'm']; - /// let mut iter = slice.array_chunks(); - /// assert_eq!(iter.next().unwrap(), &['l', 'o']); - /// assert_eq!(iter.next().unwrap(), &['r', 'e']); - /// assert!(iter.next().is_none()); - /// assert_eq!(iter.remainder(), &['m']); - /// ``` - /// - /// [`chunks_exact`]: slice::chunks_exact - #[unstable(feature = "array_chunks", issue = "74985")] - #[rustc_const_unstable(feature = "const_slice_make_iter", issue = "137737")] - #[inline] - #[track_caller] - pub const fn array_chunks(&self) -> ArrayChunks<'_, T, N> { - assert!(N != 0, "chunk size must be non-zero"); - ArrayChunks::new(self) - } - /// Splits the slice into a slice of `N`-element arrays, /// assuming that there's no remainder. /// @@ -1650,44 +1612,6 @@ impl [T] { (remainder, array_slice) } - /// Returns an iterator over `N` elements of the slice at a time, starting at the - /// beginning of the slice. - /// - /// The chunks are mutable array references and do not overlap. If `N` does not divide - /// the length of the slice, then the last up to `N-1` elements will be omitted and - /// can be retrieved from the `into_remainder` function of the iterator. - /// - /// This method is the const generic equivalent of [`chunks_exact_mut`]. - /// - /// # Panics - /// - /// Panics if `N` is zero. This check will most probably get changed to a compile time - /// error before this method gets stabilized. - /// - /// # Examples - /// - /// ``` - /// #![feature(array_chunks)] - /// let v = &mut [0, 0, 0, 0, 0]; - /// let mut count = 1; - /// - /// for chunk in v.array_chunks_mut() { - /// *chunk = [count; 2]; - /// count += 1; - /// } - /// assert_eq!(v, &[1, 1, 2, 2, 0]); - /// ``` - /// - /// [`chunks_exact_mut`]: slice::chunks_exact_mut - #[unstable(feature = "array_chunks", issue = "74985")] - #[rustc_const_unstable(feature = "const_slice_make_iter", issue = "137737")] - #[inline] - #[track_caller] - pub const fn array_chunks_mut(&mut self) -> ArrayChunksMut<'_, T, N> { - assert!(N != 0, "chunk size must be non-zero"); - ArrayChunksMut::new(self) - } - /// Returns an iterator over overlapping windows of `N` elements of a slice, /// starting at the beginning of the slice. /// diff --git a/library/core/src/slice/sort/select.rs b/library/core/src/slice/sort/select.rs index 82194db7fd86b..fc31013caf88c 100644 --- a/library/core/src/slice/sort/select.rs +++ b/library/core/src/slice/sort/select.rs @@ -101,8 +101,7 @@ fn partition_at_index_loop<'a, T, F>( // slice. Partition the slice into elements equal to and elements greater than the pivot. // This case is usually hit when the slice contains many duplicate elements. if let Some(p) = ancestor_pivot { - // SAFETY: choose_pivot promises to return a valid pivot position. - let pivot = unsafe { v.get_unchecked(pivot_pos) }; + let pivot = &v[pivot_pos]; if !is_less(p, pivot) { let num_lt = partition(v, pivot_pos, &mut |a, b| !is_less(b, a)); diff --git a/library/core/src/slice/sort/shared/pivot.rs b/library/core/src/slice/sort/shared/pivot.rs index 3aace484b6a89..9eb60f854ce21 100644 --- a/library/core/src/slice/sort/shared/pivot.rs +++ b/library/core/src/slice/sort/shared/pivot.rs @@ -1,6 +1,6 @@ //! This module contains the logic for pivot selection. -use crate::intrinsics; +use crate::{hint, intrinsics}; // Recursively select a pseudomedian if above this threshold. const PSEUDO_MEDIAN_REC_THRESHOLD: usize = 64; @@ -9,6 +9,7 @@ const PSEUDO_MEDIAN_REC_THRESHOLD: usize = 64; /// /// This chooses a pivot by sampling an adaptive amount of points, approximating /// the quality of a median of sqrt(n) elements. +#[inline] pub fn choose_pivot bool>(v: &[T], is_less: &mut F) -> usize { // We use unsafe code and raw pointers here because we're dealing with // heavy recursion. Passing safe slices around would involve a lot of @@ -22,7 +23,7 @@ pub fn choose_pivot bool>(v: &[T], is_less: &mut F) -> us // SAFETY: a, b, c point to initialized regions of len_div_8 elements, // satisfying median3 and median3_rec's preconditions as v_base points // to an initialized region of n = len elements. - unsafe { + let index = unsafe { let v_base = v.as_ptr(); let len_div_8 = len / 8; @@ -35,6 +36,11 @@ pub fn choose_pivot bool>(v: &[T], is_less: &mut F) -> us } else { median3_rec(a, b, c, len_div_8, is_less).offset_from_unsigned(v_base) } + }; + // SAFETY: preconditions must have been met for offset_from_unsigned() + unsafe { + hint::assert_unchecked(index < v.len()); + index } } diff --git a/library/core/src/slice/sort/stable/quicksort.rs b/library/core/src/slice/sort/stable/quicksort.rs index 3c9688790c40b..0439ba870bd2b 100644 --- a/library/core/src/slice/sort/stable/quicksort.rs +++ b/library/core/src/slice/sort/stable/quicksort.rs @@ -37,10 +37,6 @@ pub fn quicksort bool>( limit -= 1; let pivot_pos = choose_pivot(v, is_less); - // SAFETY: choose_pivot promises to return a valid pivot index. - unsafe { - intrinsics::assume(pivot_pos < v.len()); - } // SAFETY: We only access the temporary copy for Freeze types, otherwise // self-modifications via `is_less` would not be observed and this would diff --git a/library/core/src/slice/sort/unstable/quicksort.rs b/library/core/src/slice/sort/unstable/quicksort.rs index 98efee242ebae..bdf56a8080305 100644 --- a/library/core/src/slice/sort/unstable/quicksort.rs +++ b/library/core/src/slice/sort/unstable/quicksort.rs @@ -48,8 +48,7 @@ pub(crate) fn quicksort<'a, T, F>( // slice. Partition the slice into elements equal to and elements greater than the pivot. // This case is usually hit when the slice contains many duplicate elements. if let Some(p) = ancestor_pivot { - // SAFETY: We assume choose_pivot yields an in-bounds position. - if !is_less(p, unsafe { v.get_unchecked(pivot_pos) }) { + if !is_less(p, &v[pivot_pos]) { let num_lt = partition(v, pivot_pos, &mut |a, b| !is_less(b, a)); // Continue sorting elements greater than the pivot. We know that `num_lt` contains diff --git a/library/core/src/str/iter.rs b/library/core/src/str/iter.rs index bcf886484add4..d2985d8a18669 100644 --- a/library/core/src/str/iter.rs +++ b/library/core/src/str/iter.rs @@ -52,7 +52,7 @@ impl<'a> Iterator for Chars<'a> { const CHUNK_SIZE: usize = 32; if remainder >= CHUNK_SIZE { - let mut chunks = self.iter.as_slice().array_chunks::(); + let mut chunks = self.iter.as_slice().as_chunks::().0.iter(); let mut bytes_skipped: usize = 0; while remainder > CHUNK_SIZE diff --git a/library/core/src/str/mod.rs b/library/core/src/str/mod.rs index 029abf1753913..c40af4de7e03d 100644 --- a/library/core/src/str/mod.rs +++ b/library/core/src/str/mod.rs @@ -407,17 +407,22 @@ impl str { /// ``` #[unstable(feature = "round_char_boundary", issue = "93743")] #[inline] - pub fn floor_char_boundary(&self, index: usize) -> usize { + pub const fn floor_char_boundary(&self, index: usize) -> usize { if index >= self.len() { self.len() } else { - let lower_bound = index.saturating_sub(3); - let new_index = self.as_bytes()[lower_bound..=index] - .iter() - .rposition(|b| b.is_utf8_char_boundary()); - - // SAFETY: we know that the character boundary will be within four bytes - unsafe { lower_bound + new_index.unwrap_unchecked() } + let mut i = index; + while i > 0 { + if self.as_bytes()[i].is_utf8_char_boundary() { + break; + } + i -= 1; + } + + // The character boundary will be within four bytes of the index + debug_assert!(i >= index.saturating_sub(3)); + + i } } @@ -445,15 +450,22 @@ impl str { /// ``` #[unstable(feature = "round_char_boundary", issue = "93743")] #[inline] - pub fn ceil_char_boundary(&self, index: usize) -> usize { + pub const fn ceil_char_boundary(&self, index: usize) -> usize { if index >= self.len() { self.len() } else { - let upper_bound = Ord::min(index + 4, self.len()); - self.as_bytes()[index..upper_bound] - .iter() - .position(|b| b.is_utf8_char_boundary()) - .map_or(upper_bound, |pos| pos + index) + let mut i = index; + while i < self.len() { + if self.as_bytes()[i].is_utf8_char_boundary() { + break; + } + i += 1; + } + + // The character boundary will be within four bytes of the index + debug_assert!(i <= index + 3); + + i } } diff --git a/library/core/src/str/pattern.rs b/library/core/src/str/pattern.rs index d7b63f9f4a660..104dc8369a0ac 100644 --- a/library/core/src/str/pattern.rs +++ b/library/core/src/str/pattern.rs @@ -1001,7 +1001,10 @@ impl<'b> Pattern for &'b str { return haystack.as_bytes().contains(&self.as_bytes()[0]); } - #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] + #[cfg(any( + all(target_arch = "x86_64", target_feature = "sse2"), + all(target_arch = "loongarch64", target_feature = "lsx") + ))] if self.len() <= 32 { if let Some(result) = simd_contains(self, haystack) { return result; @@ -1775,11 +1778,18 @@ impl TwoWayStrategy for RejectAndMatch { /// If we ever ship std with for x86-64-v3 or adapt this for other platforms then wider vectors /// should be evaluated. /// +/// Similarly, on LoongArch the 128-bit LSX vector extension is the baseline, +/// so we also use `u8x16` there. Wider vector widths may be considered +/// for future LoongArch extensions (e.g., LASX). +/// /// For haystacks smaller than vector-size + needle length it falls back to /// a naive O(n*m) search so this implementation should not be called on larger needles. /// /// [0]: http://0x80.pl/articles/simd-strfind.html#sse-avx2 -#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] +#[cfg(any( + all(target_arch = "x86_64", target_feature = "sse2"), + all(target_arch = "loongarch64", target_feature = "lsx") +))] #[inline] fn simd_contains(needle: &str, haystack: &str) -> Option { let needle = needle.as_bytes(); @@ -1911,7 +1921,10 @@ fn simd_contains(needle: &str, haystack: &str) -> Option { /// # Safety /// /// Both slices must have the same length. -#[cfg(all(target_arch = "x86_64", any(kani, target_feature = "sse2")))] // only called on x86 +#[cfg(any( + all(target_arch = "x86_64", any(kani, target_feature = "sse2")), + all(target_arch = "loongarch64", target_feature = "lsx") +))] #[inline] #[requires(x.len() == y.len())] unsafe fn small_slice_eq(x: &[u8], y: &[u8]) -> bool { diff --git a/library/core/src/str/traits.rs b/library/core/src/str/traits.rs index d0f2b9226bf6d..1597d1c1fa868 100644 --- a/library/core/src/str/traits.rs +++ b/library/core/src/str/traits.rs @@ -825,6 +825,8 @@ unsafe impl const SliceIndex for ops::RangeToInclusive { /// assert!(Point::from_str("(1 2)").is_err()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] +#[const_trait] +#[rustc_const_unstable(feature = "const_try", issue = "74935")] pub trait FromStr: Sized { /// The associated error which can be returned from parsing. #[stable(feature = "rust1", since = "1.0.0")] diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs index 04c8d1473b048..70c02ead35848 100644 --- a/library/core/src/sync/atomic.rs +++ b/library/core/src/sync/atomic.rs @@ -563,8 +563,8 @@ impl AtomicBool { /// `align_of::() == 1`). /// * `ptr` must be [valid] for both reads and writes for the whole lifetime `'a`. /// * You must adhere to the [Memory model for atomic accesses]. In particular, it is not - /// allowed to mix atomic and non-atomic accesses, or atomic accesses of different sizes, - /// without synchronization. + /// allowed to mix conflicting atomic and non-atomic accesses, or atomic accesses of different + /// sizes, without synchronization. /// /// [valid]: crate::ptr#safety /// [Memory model for atomic accesses]: self#memory-model-for-atomic-accesses @@ -1245,8 +1245,8 @@ impl AtomicBool { /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the /// atomic types work with interior mutability. All modifications of an atomic change the value /// through a shared reference, and can do so safely as long as they use atomic operations. Any - /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the same - /// restriction: operations on it must be atomic. + /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the + /// requirements of the [memory model]. /// /// # Examples /// @@ -1264,6 +1264,8 @@ impl AtomicBool { /// } /// # } /// ``` + /// + /// [memory model]: self#memory-model-for-atomic-accesses #[inline] #[stable(feature = "atomic_as_ptr", since = "1.70.0")] #[rustc_const_stable(feature = "atomic_as_ptr", since = "1.70.0")] @@ -1519,8 +1521,8 @@ impl AtomicPtr { /// can be bigger than `align_of::<*mut T>()`). /// * `ptr` must be [valid] for both reads and writes for the whole lifetime `'a`. /// * You must adhere to the [Memory model for atomic accesses]. In particular, it is not - /// allowed to mix atomic and non-atomic accesses, or atomic accesses of different sizes, - /// without synchronization. + /// allowed to mix conflicting atomic and non-atomic accesses, or atomic accesses of different + /// sizes, without synchronization. /// /// [valid]: crate::ptr#safety /// [Memory model for atomic accesses]: self#memory-model-for-atomic-accesses @@ -2487,8 +2489,8 @@ impl AtomicPtr { /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the /// atomic types work with interior mutability. All modifications of an atomic change the value /// through a shared reference, and can do so safely as long as they use atomic operations. Any - /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the same - /// restriction: operations on it must be atomic. + /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the + /// requirements of the [memory model]. /// /// # Examples /// @@ -2507,6 +2509,8 @@ impl AtomicPtr { /// my_atomic_op(atomic.as_ptr()); /// } /// ``` + /// + /// [memory model]: self#memory-model-for-atomic-accesses #[inline] #[stable(feature = "atomic_as_ptr", since = "1.70.0")] #[rustc_const_stable(feature = "atomic_as_ptr", since = "1.70.0")] @@ -2518,7 +2522,8 @@ impl AtomicPtr { #[cfg(target_has_atomic_load_store = "8")] #[stable(feature = "atomic_bool_from", since = "1.24.0")] -impl From for AtomicBool { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl const From for AtomicBool { /// Converts a `bool` into an `AtomicBool`. /// /// # Examples @@ -2615,7 +2620,8 @@ macro_rules! atomic_int { } #[$stable_from] - impl From<$int_type> for $atomic_type { + #[rustc_const_unstable(feature = "const_try", issue = "74935")] + impl const From<$int_type> for $atomic_type { #[doc = concat!("Converts an `", stringify!($int_type), "` into an `", stringify!($atomic_type), "`.")] #[inline] fn from(v: $int_type) -> Self { Self::new(v) } @@ -2696,8 +2702,8 @@ macro_rules! atomic_int { }] /// * `ptr` must be [valid] for both reads and writes for the whole lifetime `'a`. /// * You must adhere to the [Memory model for atomic accesses]. In particular, it is not - /// allowed to mix atomic and non-atomic accesses, or atomic accesses of different sizes, - /// without synchronization. + /// allowed to mix conflicting atomic and non-atomic accesses, or atomic accesses of different + /// sizes, without synchronization. /// /// [valid]: crate::ptr#safety /// [Memory model for atomic accesses]: self#memory-model-for-atomic-accesses @@ -3617,8 +3623,8 @@ macro_rules! atomic_int { /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the /// atomic types work with interior mutability. All modifications of an atomic change the value /// through a shared reference, and can do so safely as long as they use atomic operations. Any - /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the same - /// restriction: operations on it must be atomic. + /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the + /// requirements of the [memory model]. /// /// # Examples /// @@ -3638,6 +3644,8 @@ macro_rules! atomic_int { /// } /// # } /// ``` + /// + /// [memory model]: self#memory-model-for-atomic-accesses #[inline] #[stable(feature = "atomic_as_ptr", since = "1.70.0")] #[rustc_const_stable(feature = "atomic_as_ptr", since = "1.70.0")] diff --git a/library/coretests/tests/lib.rs b/library/coretests/tests/lib.rs index 4cfac9ecc2ab7..029a7b00ad36e 100644 --- a/library/coretests/tests/lib.rs +++ b/library/coretests/tests/lib.rs @@ -2,7 +2,6 @@ #![cfg_attr(target_has_atomic = "128", feature(integer_atomics))] #![cfg_attr(test, feature(cfg_select))] #![feature(alloc_layout_extra)] -#![feature(array_chunks)] #![feature(array_ptr_get)] #![feature(array_try_from_fn)] #![feature(array_windows)] @@ -30,6 +29,7 @@ #![feature(core_private_diy_float)] #![feature(cstr_display)] #![feature(dec2flt)] +#![feature(drop_guard)] #![feature(duration_constants)] #![feature(duration_constructors)] #![feature(duration_constructors_lite)] @@ -76,6 +76,7 @@ #![feature(min_specialization)] #![feature(never_type)] #![feature(next_index)] +#![feature(non_exhaustive_omitted_patterns_lint)] #![feature(numfmt)] #![feature(pattern)] #![feature(pointer_is_aligned_to)] diff --git a/library/coretests/tests/macros.rs b/library/coretests/tests/macros.rs index 1c6aa90dfbca2..50b5eb63e43a7 100644 --- a/library/coretests/tests/macros.rs +++ b/library/coretests/tests/macros.rs @@ -213,3 +213,9 @@ fn _expression() { } ); } + +#[deny(non_exhaustive_omitted_patterns)] +fn _matches_does_not_trigger_non_exhaustive_omitted_patterns_lint(o: core::sync::atomic::Ordering) { + // Ordering is a #[non_exhaustive] enum from a separate crate + let _m = matches!(o, core::sync::atomic::Ordering::Relaxed); +} diff --git a/library/coretests/tests/mem.rs b/library/coretests/tests/mem.rs index 9c15be4a8c4bd..e896c61ef4881 100644 --- a/library/coretests/tests/mem.rs +++ b/library/coretests/tests/mem.rs @@ -1,5 +1,6 @@ use core::mem::*; use core::{array, ptr}; +use std::cell::Cell; #[cfg(panic = "unwind")] use std::rc::Rc; @@ -795,3 +796,48 @@ fn const_maybe_uninit_zeroed() { assert_eq!(unsafe { (*UNINIT.0.cast::<[[u8; SIZE]; 1]>())[0] }, [0u8; SIZE]); } + +#[test] +fn drop_guards_only_dropped_by_closure_when_run() { + let value_drops = Cell::new(0); + let value = DropGuard::new((), |()| value_drops.set(1 + value_drops.get())); + let closure_drops = Cell::new(0); + let guard = DropGuard::new(value, |_| closure_drops.set(1 + closure_drops.get())); + assert_eq!(value_drops.get(), 0); + assert_eq!(closure_drops.get(), 0); + drop(guard); + assert_eq!(value_drops.get(), 1); + assert_eq!(closure_drops.get(), 1); +} + +#[test] +fn drop_guard_into_inner() { + let dropped = Cell::new(false); + let value = DropGuard::new(42, |_| dropped.set(true)); + let guard = DropGuard::new(value, |_| dropped.set(true)); + let inner = DropGuard::into_inner(guard); + assert_eq!(dropped.get(), false); + assert_eq!(*inner, 42); +} + +#[test] +#[cfg(panic = "unwind")] +fn drop_guard_always_drops_value_if_closure_drop_unwinds() { + // Create a value with a destructor, which we will validate ran successfully. + let mut value_was_dropped = false; + let value_with_tracked_destruction = DropGuard::new((), |_| value_was_dropped = true); + + // Create a closure that will begin unwinding when dropped. + let drop_bomb = DropGuard::new((), |_| panic!()); + let closure_that_panics_on_drop = move |_| { + let _drop_bomb = drop_bomb; + }; + + // This will run the closure, which will panic when dropped. This should + // run the destructor of the value we passed, which we validate. + let _ = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { + let guard = DropGuard::new(value_with_tracked_destruction, closure_that_panics_on_drop); + DropGuard::into_inner(guard); + })); + assert!(value_was_dropped); +} diff --git a/library/coretests/tests/num/dec2flt/float.rs b/library/coretests/tests/num/dec2flt/float.rs index 193d5887749ab..8bf4094ced72f 100644 --- a/library/coretests/tests/num/dec2flt/float.rs +++ b/library/coretests/tests/num/dec2flt/float.rs @@ -1,13 +1,14 @@ use core::num::dec2flt::float::RawFloat; +use crate::num::{ldexp_f32, ldexp_f64}; + // FIXME(f16_f128): enable on all targets once possible. #[test] #[cfg(target_has_reliable_f16)] fn test_f16_integer_decode() { assert_eq!(3.14159265359f16.integer_decode(), (1608, -9, 1)); assert_eq!((-8573.5918555f16).integer_decode(), (1072, 3, -1)); - #[cfg(not(miri))] // miri doesn't have powf16 - assert_eq!(2f16.powf(14.0).integer_decode(), (1 << 10, 4, 1)); + assert_eq!(crate::num::ldexp_f16(1.0, 14).integer_decode(), (1 << 10, 4, 1)); assert_eq!(0f16.integer_decode(), (0, -25, 1)); assert_eq!((-0f16).integer_decode(), (0, -25, -1)); assert_eq!(f16::INFINITY.integer_decode(), (1 << 10, 6, 1)); @@ -23,8 +24,7 @@ fn test_f16_integer_decode() { fn test_f32_integer_decode() { assert_eq!(3.14159265359f32.integer_decode(), (13176795, -22, 1)); assert_eq!((-8573.5918555f32).integer_decode(), (8779358, -10, -1)); - // Set 2^100 directly instead of using powf, because it doesn't guarantee precision - assert_eq!(1.2676506e30_f32.integer_decode(), (8388608, 77, 1)); + assert_eq!(ldexp_f32(1.0, 100).integer_decode(), (8388608, 77, 1)); assert_eq!(0f32.integer_decode(), (0, -150, 1)); assert_eq!((-0f32).integer_decode(), (0, -150, -1)); assert_eq!(f32::INFINITY.integer_decode(), (8388608, 105, 1)); @@ -40,8 +40,7 @@ fn test_f32_integer_decode() { fn test_f64_integer_decode() { assert_eq!(3.14159265359f64.integer_decode(), (7074237752028906, -51, 1)); assert_eq!((-8573.5918555f64).integer_decode(), (4713381968463931, -39, -1)); - // Set 2^100 directly instead of using powf, because it doesn't guarantee precision - assert_eq!(1.2676506002282294e30_f64.integer_decode(), (4503599627370496, 48, 1)); + assert_eq!(ldexp_f64(1.0, 100).integer_decode(), (4503599627370496, 48, 1)); assert_eq!(0f64.integer_decode(), (0, -1075, 1)); assert_eq!((-0f64).integer_decode(), (0, -1075, -1)); assert_eq!(f64::INFINITY.integer_decode(), (4503599627370496, 972, 1)); diff --git a/library/coretests/tests/num/flt2dec/estimator.rs b/library/coretests/tests/num/flt2dec/estimator.rs index da203b5f3620e..f53282611f686 100644 --- a/library/coretests/tests/num/flt2dec/estimator.rs +++ b/library/coretests/tests/num/flt2dec/estimator.rs @@ -1,5 +1,7 @@ use core::num::flt2dec::estimator::*; +use crate::num::ldexp_f64; + #[test] fn test_estimate_scaling_factor() { macro_rules! assert_almost_eq { @@ -56,7 +58,7 @@ fn test_estimate_scaling_factor() { let step = if cfg!(miri) { 37 } else { 1 }; for i in (-1074..972).step_by(step) { - let expected = super::ldexp_f64(1.0, i).log10().ceil(); + let expected = ldexp_f64(1.0, i).log10().ceil(); assert_almost_eq!(estimate_scaling_factor(1, i as i16), expected as i16); } } diff --git a/library/coretests/tests/num/flt2dec/mod.rs b/library/coretests/tests/num/flt2dec/mod.rs index ce36db33d05f3..4e73bd1f12ee9 100644 --- a/library/coretests/tests/num/flt2dec/mod.rs +++ b/library/coretests/tests/num/flt2dec/mod.rs @@ -6,6 +6,8 @@ use core::num::fmt::{Formatted, Part}; use std::mem::MaybeUninit; use std::{fmt, str}; +use crate::num::{ldexp_f32, ldexp_f64}; + mod estimator; mod strategy { mod dragon; @@ -75,24 +77,6 @@ macro_rules! try_fixed { }) } -#[cfg(target_has_reliable_f16)] -fn ldexp_f16(a: f16, b: i32) -> f16 { - ldexp_f64(a as f64, b) as f16 -} - -fn ldexp_f32(a: f32, b: i32) -> f32 { - ldexp_f64(a as f64, b) as f32 -} - -fn ldexp_f64(a: f64, b: i32) -> f64 { - unsafe extern "C" { - fn ldexp(x: f64, n: i32) -> f64; - } - // SAFETY: assuming a correct `ldexp` has been supplied, the given arguments cannot possibly - // cause undefined behavior - unsafe { ldexp(a, b) } -} - fn check_exact(mut f: F, v: T, vstr: &str, expected: &[u8], expectedk: i16) where T: DecodableFloat, @@ -268,7 +252,7 @@ where // 10^2 * 0.31984375 // 10^2 * 0.32 // 10^2 * 0.3203125 - check_shortest!(f(ldexp_f16(1.0, 5)) => b"32", 2); + check_shortest!(f(crate::num::ldexp_f16(1.0, 5)) => b"32", 2); // 10^5 * 0.65472 // 10^5 * 0.65504 @@ -283,7 +267,7 @@ where // 10^-9 * 0 // 10^-9 * 0.59604644775390625 // 10^-8 * 0.11920928955078125 - let minf16 = ldexp_f16(1.0, -24); + let minf16 = crate::num::ldexp_f16(1.0, -24); check_shortest!(f(minf16) => b"6", -7); } @@ -292,7 +276,7 @@ pub fn f16_exact_sanity_test(mut f: F) where F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit], i16) -> (&'a [u8], i16), { - let minf16 = ldexp_f16(1.0, -24); + let minf16 = crate::num::ldexp_f16(1.0, -24); check_exact!(f(0.1f16) => b"999755859375 ", -1); check_exact!(f(0.5f16) => b"5 ", 0); @@ -642,7 +626,7 @@ where assert_eq!(to_string(f, f16::MAX, Minus, 1), "65500.0"); assert_eq!(to_string(f, f16::MAX, Minus, 8), "65500.00000000"); - let minf16 = ldexp_f16(1.0, -24); + let minf16 = crate::num::ldexp_f16(1.0, -24); assert_eq!(to_string(f, minf16, Minus, 0), "0.00000006"); assert_eq!(to_string(f, minf16, Minus, 8), "0.00000006"); assert_eq!(to_string(f, minf16, Minus, 9), "0.000000060"); @@ -766,7 +750,7 @@ where assert_eq!(to_string(f, f16::MAX, Minus, (-4, 4), false), "6.55e4"); assert_eq!(to_string(f, f16::MAX, Minus, (-5, 5), false), "65500"); - let minf16 = ldexp_f16(1.0, -24); + let minf16 = crate::num::ldexp_f16(1.0, -24); assert_eq!(to_string(f, minf16, Minus, (-2, 2), false), "6e-8"); assert_eq!(to_string(f, minf16, Minus, (-7, 7), false), "6e-8"); assert_eq!(to_string(f, minf16, Minus, (-8, 8), false), "0.00000006"); @@ -922,7 +906,7 @@ where assert_eq!(to_string(f, f16::MAX, Minus, 6, false), "6.55040e4"); assert_eq!(to_string(f, f16::MAX, Minus, 16, false), "6.550400000000000e4"); - let minf16 = ldexp_f16(1.0, -24); + let minf16 = crate::num::ldexp_f16(1.0, -24); assert_eq!(to_string(f, minf16, Minus, 1, false), "6e-8"); assert_eq!(to_string(f, minf16, Minus, 2, false), "6.0e-8"); assert_eq!(to_string(f, minf16, Minus, 4, false), "5.960e-8"); @@ -1229,7 +1213,7 @@ where #[cfg(target_has_reliable_f16)] { - let minf16 = ldexp_f16(1.0, -24); + let minf16 = crate::num::ldexp_f16(1.0, -24); assert_eq!(to_string(f, minf16, Minus, 0), "0"); assert_eq!(to_string(f, minf16, Minus, 1), "0.0"); assert_eq!(to_string(f, minf16, Minus, 2), "0.00"); diff --git a/library/coretests/tests/num/mod.rs b/library/coretests/tests/num/mod.rs index f340926292c88..54e54f734f647 100644 --- a/library/coretests/tests/num/mod.rs +++ b/library/coretests/tests/num/mod.rs @@ -54,6 +54,27 @@ macro_rules! assume_usize_width { } } +/// Return `a * 2^b`. +#[cfg(target_has_reliable_f16)] +fn ldexp_f16(a: f16, b: i32) -> f16 { + ldexp_f64(a as f64, b) as f16 +} + +/// Return `a * 2^b`. +fn ldexp_f32(a: f32, b: i32) -> f32 { + ldexp_f64(a as f64, b) as f32 +} + +/// Return `a * 2^b`. +fn ldexp_f64(a: f64, b: i32) -> f64 { + unsafe extern "C" { + fn ldexp(x: f64, n: i32) -> f64; + } + // SAFETY: assuming a correct `ldexp` has been supplied, the given arguments cannot possibly + // cause undefined behavior + unsafe { ldexp(a, b) } +} + /// Helper function for testing numeric operations pub fn test_num(ten: T, two: T) where diff --git a/library/coretests/tests/slice.rs b/library/coretests/tests/slice.rs index d17e681480c70..992f24cb18f20 100644 --- a/library/coretests/tests/slice.rs +++ b/library/coretests/tests/slice.rs @@ -611,190 +611,6 @@ fn test_chunks_exact_mut_zip() { assert_eq!(v1, [13, 14, 19, 20, 4]); } -#[test] -fn test_array_chunks_infer() { - let v: &[i32] = &[0, 1, 2, 3, 4, -4]; - let c = v.array_chunks(); - for &[a, b, c] in c { - assert_eq!(a + b + c, 3); - } - - let v2: &[i32] = &[0, 1, 2, 3, 4, 5, 6]; - let total = v2.array_chunks().map(|&[a, b]| a * b).sum::(); - assert_eq!(total, 2 * 3 + 4 * 5); -} - -#[test] -fn test_array_chunks_count() { - let v: &[i32] = &[0, 1, 2, 3, 4, 5]; - let c = v.array_chunks::<3>(); - assert_eq!(c.count(), 2); - - let v2: &[i32] = &[0, 1, 2, 3, 4]; - let c2 = v2.array_chunks::<2>(); - assert_eq!(c2.count(), 2); - - let v3: &[i32] = &[]; - let c3 = v3.array_chunks::<2>(); - assert_eq!(c3.count(), 0); -} - -#[test] -fn test_array_chunks_nth() { - let v: &[i32] = &[0, 1, 2, 3, 4, 5]; - let mut c = v.array_chunks::<2>(); - assert_eq!(c.nth(1).unwrap(), &[2, 3]); - assert_eq!(c.next().unwrap(), &[4, 5]); - - let v2: &[i32] = &[0, 1, 2, 3, 4, 5, 6]; - let mut c2 = v2.array_chunks::<3>(); - assert_eq!(c2.nth(1).unwrap(), &[3, 4, 5]); - assert_eq!(c2.next(), None); -} - -#[test] -fn test_array_chunks_nth_back() { - let v: &[i32] = &[0, 1, 2, 3, 4, 5]; - let mut c = v.array_chunks::<2>(); - assert_eq!(c.nth_back(1).unwrap(), &[2, 3]); - assert_eq!(c.next().unwrap(), &[0, 1]); - assert_eq!(c.next(), None); - - let v2: &[i32] = &[0, 1, 2, 3, 4]; - let mut c2 = v2.array_chunks::<3>(); - assert_eq!(c2.nth_back(0).unwrap(), &[0, 1, 2]); - assert_eq!(c2.next(), None); - assert_eq!(c2.next_back(), None); - - let v3: &[i32] = &[0, 1, 2, 3, 4]; - let mut c3 = v3.array_chunks::<10>(); - assert_eq!(c3.nth_back(0), None); -} - -#[test] -fn test_array_chunks_last() { - let v: &[i32] = &[0, 1, 2, 3, 4, 5]; - let c = v.array_chunks::<2>(); - assert_eq!(c.last().unwrap(), &[4, 5]); - - let v2: &[i32] = &[0, 1, 2, 3, 4]; - let c2 = v2.array_chunks::<2>(); - assert_eq!(c2.last().unwrap(), &[2, 3]); -} - -#[test] -fn test_array_chunks_remainder() { - let v: &[i32] = &[0, 1, 2, 3, 4]; - let c = v.array_chunks::<2>(); - assert_eq!(c.remainder(), &[4]); -} - -#[test] -fn test_array_chunks_zip() { - let v1: &[i32] = &[0, 1, 2, 3, 4]; - let v2: &[i32] = &[6, 7, 8, 9, 10]; - - let res = v1 - .array_chunks::<2>() - .zip(v2.array_chunks::<2>()) - .map(|(a, b)| a.iter().sum::() + b.iter().sum::()) - .collect::>(); - assert_eq!(res, vec![14, 22]); -} - -#[test] -fn test_array_chunks_mut_infer() { - let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5, 6]; - for a in v.array_chunks_mut() { - let sum = a.iter().sum::(); - *a = [sum; 3]; - } - assert_eq!(v, &[3, 3, 3, 12, 12, 12, 6]); - - let v2: &mut [i32] = &mut [0, 1, 2, 3, 4, 5, 6]; - v2.array_chunks_mut().for_each(|[a, b]| core::mem::swap(a, b)); - assert_eq!(v2, &[1, 0, 3, 2, 5, 4, 6]); -} - -#[test] -fn test_array_chunks_mut_count() { - let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5]; - let c = v.array_chunks_mut::<3>(); - assert_eq!(c.count(), 2); - - let v2: &mut [i32] = &mut [0, 1, 2, 3, 4]; - let c2 = v2.array_chunks_mut::<2>(); - assert_eq!(c2.count(), 2); - - let v3: &mut [i32] = &mut []; - let c3 = v3.array_chunks_mut::<2>(); - assert_eq!(c3.count(), 0); -} - -#[test] -fn test_array_chunks_mut_nth() { - let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5]; - let mut c = v.array_chunks_mut::<2>(); - assert_eq!(c.nth(1).unwrap(), &[2, 3]); - assert_eq!(c.next().unwrap(), &[4, 5]); - - let v2: &mut [i32] = &mut [0, 1, 2, 3, 4, 5, 6]; - let mut c2 = v2.array_chunks_mut::<3>(); - assert_eq!(c2.nth(1).unwrap(), &[3, 4, 5]); - assert_eq!(c2.next(), None); -} - -#[test] -fn test_array_chunks_mut_nth_back() { - let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5]; - let mut c = v.array_chunks_mut::<2>(); - assert_eq!(c.nth_back(1).unwrap(), &[2, 3]); - assert_eq!(c.next().unwrap(), &[0, 1]); - assert_eq!(c.next(), None); - - let v2: &mut [i32] = &mut [0, 1, 2, 3, 4]; - let mut c2 = v2.array_chunks_mut::<3>(); - assert_eq!(c2.nth_back(0).unwrap(), &[0, 1, 2]); - assert_eq!(c2.next(), None); - assert_eq!(c2.next_back(), None); - - let v3: &mut [i32] = &mut [0, 1, 2, 3, 4]; - let mut c3 = v3.array_chunks_mut::<10>(); - assert_eq!(c3.nth_back(0), None); -} - -#[test] -fn test_array_chunks_mut_last() { - let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5]; - let c = v.array_chunks_mut::<2>(); - assert_eq!(c.last().unwrap(), &[4, 5]); - - let v2: &mut [i32] = &mut [0, 1, 2, 3, 4]; - let c2 = v2.array_chunks_mut::<2>(); - assert_eq!(c2.last().unwrap(), &[2, 3]); -} - -#[test] -fn test_array_chunks_mut_remainder() { - let v: &mut [i32] = &mut [0, 1, 2, 3, 4]; - let c = v.array_chunks_mut::<2>(); - assert_eq!(c.into_remainder(), &[4]); -} - -#[test] -fn test_array_chunks_mut_zip() { - let v1: &mut [i32] = &mut [0, 1, 2, 3, 4]; - let v2: &[i32] = &[6, 7, 8, 9, 10]; - - for (a, b) in v1.array_chunks_mut::<2>().zip(v2.array_chunks::<2>()) { - let sum = b.iter().sum::(); - for v in a { - *v += sum; - } - } - assert_eq!(v1, [13, 14, 19, 20, 4]); -} - #[test] fn test_array_windows_infer() { let v: &[i32] = &[0, 1, 0, 1]; diff --git a/library/rustc-std-workspace-alloc/Cargo.toml b/library/rustc-std-workspace-alloc/Cargo.toml index 5a177808d1bd8..a5b51059119c0 100644 --- a/library/rustc-std-workspace-alloc/Cargo.toml +++ b/library/rustc-std-workspace-alloc/Cargo.toml @@ -9,6 +9,9 @@ edition = "2024" [lib] path = "lib.rs" +test = false +bench = false +doc = false [dependencies] alloc = { path = "../alloc" } diff --git a/library/rustc-std-workspace-core/Cargo.toml b/library/rustc-std-workspace-core/Cargo.toml index 1ddc112380f16..d68965c634578 100644 --- a/library/rustc-std-workspace-core/Cargo.toml +++ b/library/rustc-std-workspace-core/Cargo.toml @@ -11,6 +11,9 @@ edition = "2024" [lib] path = "lib.rs" +test = false +bench = false +doc = false [dependencies] core = { path = "../core", public = true } diff --git a/library/rustc-std-workspace-std/Cargo.toml b/library/rustc-std-workspace-std/Cargo.toml index f70994e1f8868..6079dc85d906b 100644 --- a/library/rustc-std-workspace-std/Cargo.toml +++ b/library/rustc-std-workspace-std/Cargo.toml @@ -9,6 +9,9 @@ edition = "2024" [lib] path = "lib.rs" +test = false +bench = false +doc = false [dependencies] std = { path = "../std" } diff --git a/library/std/Cargo.toml b/library/std/Cargo.toml index cde8654e3a0de..6b539279ea2d7 100644 --- a/library/std/Cargo.toml +++ b/library/std/Cargo.toml @@ -23,9 +23,7 @@ unwind = { path = "../unwind" } hashbrown = { version = "0.15", default-features = false, features = [ 'rustc-dep-of-std', ] } -std_detect = { path = "../stdarch/crates/std_detect", public = true, default-features = false, features = [ - 'rustc-dep-of-std', -] } +std_detect = { path = "../std_detect", public = true } safety = { path = "../contracts/safety" } # Dependencies of the `backtrace` crate @@ -66,10 +64,10 @@ rand = { version = "0.9.0", default-features = false, features = ["alloc"] } rand_xorshift = "0.4.0" [target.'cfg(any(all(target_family = "wasm", target_os = "unknown"), target_os = "xous", all(target_vendor = "fortanix", target_env = "sgx")))'.dependencies] -dlmalloc = { version = "0.2.4", features = ['rustc-dep-of-std'] } +dlmalloc = { version = "0.2.10", features = ['rustc-dep-of-std'] } [target.x86_64-fortanix-unknown-sgx.dependencies] -fortanix-sgx-abi = { version = "0.5.0", features = [ +fortanix-sgx-abi = { version = "0.6.1", features = [ 'rustc-dep-of-std', ], public = true } @@ -100,9 +98,7 @@ backtrace-trace-only = [] panic-unwind = ["dep:panic_unwind"] compiler-builtins-c = ["alloc/compiler-builtins-c"] compiler-builtins-mem = ["alloc/compiler-builtins-mem"] -compiler-builtins-no-asm = ["alloc/compiler-builtins-no-asm"] compiler-builtins-no-f16-f128 = ["alloc/compiler-builtins-no-f16-f128"] -compiler-builtins-mangled-names = ["alloc/compiler-builtins-mangled-names"] llvm-libunwind = ["unwind/llvm-libunwind"] system-llvm-libunwind = ["unwind/system-llvm-libunwind"] @@ -119,8 +115,7 @@ optimize_for_size = ["core/optimize_for_size", "alloc/optimize_for_size"] debug_refcell = ["core/debug_refcell"] -# Enable std_detect default features for stdarch/crates/std_detect: -# https://github.com/rust-lang/stdarch/blob/master/crates/std_detect/Cargo.toml +# Enable std_detect features: std_detect_file_io = ["std_detect/std_detect_file_io"] std_detect_dlsym_getauxval = ["std_detect/std_detect_dlsym_getauxval"] diff --git a/library/std/src/env.rs b/library/std/src/env.rs index 6d7d576b32a10..9f17ff7644557 100644 --- a/library/std/src/env.rs +++ b/library/std/src/env.rs @@ -617,7 +617,7 @@ impl Error for JoinPathsError { /// # Unix /// /// - Returns the value of the 'HOME' environment variable if it is set -/// (including to an empty string). +/// (and not an empty string). /// - Otherwise, it tries to determine the home directory by invoking the `getpwuid_r` function /// using the UID of the current user. An empty home directory field returned from the /// `getpwuid_r` function is considered to be a valid value. diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs index e5b3c2f34cc48..38eec467e3ea1 100644 --- a/library/std/src/lib.rs +++ b/library/std/src/lib.rs @@ -325,13 +325,13 @@ // // Library features (core): // tidy-alphabetical-start -#![feature(array_chunks)] #![feature(bstr)] #![feature(bstr_internals)] #![feature(char_internals)] #![feature(clone_to_uninit)] #![feature(core_intrinsics)] #![feature(core_io_borrowed_buf)] +#![feature(drop_guard)] #![feature(duration_constants)] #![feature(error_generic_member_access)] #![feature(error_iter)] diff --git a/library/std/src/sync/mod.rs b/library/std/src/sync/mod.rs index e67b4f6f22f5a..6ef3bf25cf67c 100644 --- a/library/std/src/sync/mod.rs +++ b/library/std/src/sync/mod.rs @@ -225,6 +225,8 @@ pub use self::poison::{MappedMutexGuard, MappedRwLockReadGuard, MappedRwLockWrit pub mod mpmc; pub mod mpsc; +#[unstable(feature = "sync_nonpoison", issue = "134645")] +pub mod nonpoison; #[unstable(feature = "sync_poison_mod", issue = "134646")] pub mod poison; diff --git a/library/std/src/sync/nonpoison.rs b/library/std/src/sync/nonpoison.rs new file mode 100644 index 0000000000000..2bbf226dc2cde --- /dev/null +++ b/library/std/src/sync/nonpoison.rs @@ -0,0 +1,37 @@ +//! Non-poisoning synchronous locks. +//! +//! The difference from the locks in the [`poison`] module is that the locks in this module will not +//! become poisoned when a thread panics while holding a guard. +//! +//! [`poison`]: super::poison + +use crate::fmt; + +/// A type alias for the result of a nonblocking locking method. +#[unstable(feature = "sync_nonpoison", issue = "134645")] +pub type TryLockResult = Result; + +/// A lock could not be acquired at this time because the operation would otherwise block. +#[unstable(feature = "sync_nonpoison", issue = "134645")] +pub struct WouldBlock; + +#[unstable(feature = "sync_nonpoison", issue = "134645")] +impl fmt::Debug for WouldBlock { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + "WouldBlock".fmt(f) + } +} + +#[unstable(feature = "sync_nonpoison", issue = "134645")] +impl fmt::Display for WouldBlock { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + "try_lock failed because the operation would block".fmt(f) + } +} + +#[unstable(feature = "mapped_lock_guards", issue = "117108")] +pub use self::mutex::MappedMutexGuard; +#[unstable(feature = "nonpoison_mutex", issue = "134645")] +pub use self::mutex::{Mutex, MutexGuard}; + +mod mutex; diff --git a/library/std/src/sync/nonpoison/mutex.rs b/library/std/src/sync/nonpoison/mutex.rs new file mode 100644 index 0000000000000..b6861c78f001d --- /dev/null +++ b/library/std/src/sync/nonpoison/mutex.rs @@ -0,0 +1,611 @@ +use crate::cell::UnsafeCell; +use crate::fmt; +use crate::marker::PhantomData; +use crate::mem::{self, ManuallyDrop}; +use crate::ops::{Deref, DerefMut}; +use crate::ptr::NonNull; +use crate::sync::nonpoison::{TryLockResult, WouldBlock}; +use crate::sys::sync as sys; + +/// A mutual exclusion primitive useful for protecting shared data that does not keep track of +/// lock poisoning. +/// +/// For more information about mutexes, check out the documentation for the poisoning variant of +/// this lock at [`poison::Mutex`]. +/// +/// [`poison::Mutex`]: crate::sync::poison::Mutex +/// +/// # Examples +/// +/// Note that this `Mutex` does **not** propagate threads that panic while holding the lock via +/// poisoning. If you need this functionality, see [`poison::Mutex`]. +/// +/// ``` +/// #![feature(nonpoison_mutex)] +/// +/// use std::thread; +/// use std::sync::{Arc, nonpoison::Mutex}; +/// +/// let mutex = Arc::new(Mutex::new(0u32)); +/// let mut handles = Vec::new(); +/// +/// for n in 0..10 { +/// let m = Arc::clone(&mutex); +/// let handle = thread::spawn(move || { +/// let mut guard = m.lock(); +/// *guard += 1; +/// panic!("panic from thread {n} {guard}") +/// }); +/// handles.push(handle); +/// } +/// +/// for h in handles { +/// let _ = h.join(); +/// } +/// +/// println!("Finished, locked {} times", mutex.lock()); +/// ``` +#[unstable(feature = "nonpoison_mutex", issue = "134645")] +#[cfg_attr(not(test), rustc_diagnostic_item = "NonPoisonMutex")] +pub struct Mutex { + inner: sys::Mutex, + data: UnsafeCell, +} + +/// `T` must be `Send` for a [`Mutex`] to be `Send` because it is possible to acquire +/// the owned `T` from the `Mutex` via [`into_inner`]. +/// +/// [`into_inner`]: Mutex::into_inner +#[unstable(feature = "nonpoison_mutex", issue = "134645")] +unsafe impl Send for Mutex {} + +/// `T` must be `Send` for [`Mutex`] to be `Sync`. +/// This ensures that the protected data can be accessed safely from multiple threads +/// without causing data races or other unsafe behavior. +/// +/// [`Mutex`] provides mutable access to `T` to one thread at a time. However, it's essential +/// for `T` to be `Send` because it's not safe for non-`Send` structures to be accessed in +/// this manner. For instance, consider [`Rc`], a non-atomic reference counted smart pointer, +/// which is not `Send`. With `Rc`, we can have multiple copies pointing to the same heap +/// allocation with a non-atomic reference count. If we were to use `Mutex>`, it would +/// only protect one instance of `Rc` from shared access, leaving other copies vulnerable +/// to potential data races. +/// +/// Also note that it is not necessary for `T` to be `Sync` as `&T` is only made available +/// to one thread at a time if `T` is not `Sync`. +/// +/// [`Rc`]: crate::rc::Rc +#[unstable(feature = "nonpoison_mutex", issue = "134645")] +unsafe impl Sync for Mutex {} + +/// An RAII implementation of a "scoped lock" of a mutex. When this structure is +/// dropped (falls out of scope), the lock will be unlocked. +/// +/// The data protected by the mutex can be accessed through this guard via its +/// [`Deref`] and [`DerefMut`] implementations. +/// +/// This structure is created by the [`lock`] and [`try_lock`] methods on +/// [`Mutex`]. +/// +/// [`lock`]: Mutex::lock +/// [`try_lock`]: Mutex::try_lock +#[must_use = "if unused the Mutex will immediately unlock"] +#[must_not_suspend = "holding a MutexGuard across suspend \ + points can cause deadlocks, delays, \ + and cause Futures to not implement `Send`"] +#[unstable(feature = "nonpoison_mutex", issue = "134645")] +#[clippy::has_significant_drop] +#[cfg_attr(not(test), rustc_diagnostic_item = "NonPoisonMutexGuard")] +pub struct MutexGuard<'a, T: ?Sized + 'a> { + lock: &'a Mutex, +} + +/// A [`MutexGuard`] is not `Send` to maximize platform portablity. +/// +/// On platforms that use POSIX threads (commonly referred to as pthreads) there is a requirement to +/// release mutex locks on the same thread they were acquired. +/// For this reason, [`MutexGuard`] must not implement `Send` to prevent it being dropped from +/// another thread. +#[unstable(feature = "nonpoison_mutex", issue = "134645")] +impl !Send for MutexGuard<'_, T> {} + +/// `T` must be `Sync` for a [`MutexGuard`] to be `Sync` +/// because it is possible to get a `&T` from `&MutexGuard` (via `Deref`). +#[unstable(feature = "nonpoison_mutex", issue = "134645")] +unsafe impl Sync for MutexGuard<'_, T> {} + +// FIXME(nonpoison_condvar): Use this link instead: [`Condvar`]: crate::sync::nonpoison::Condvar +/// An RAII mutex guard returned by `MutexGuard::map`, which can point to a +/// subfield of the protected data. When this structure is dropped (falls out +/// of scope), the lock will be unlocked. +/// +/// The main difference between `MappedMutexGuard` and [`MutexGuard`] is that the +/// former cannot be used with [`Condvar`], since that could introduce soundness issues if the +/// locked object is modified by another thread while the `Mutex` is unlocked. +/// +/// The data protected by the mutex can be accessed through this guard via its +/// [`Deref`] and [`DerefMut`] implementations. +/// +/// This structure is created by the [`map`] and [`filter_map`] methods on +/// [`MutexGuard`]. +/// +/// [`map`]: MutexGuard::map +/// [`filter_map`]: MutexGuard::filter_map +/// [`Condvar`]: crate::sync::Condvar +#[must_use = "if unused the Mutex will immediately unlock"] +#[must_not_suspend = "holding a MappedMutexGuard across suspend \ + points can cause deadlocks, delays, \ + and cause Futures to not implement `Send`"] +#[unstable(feature = "mapped_lock_guards", issue = "117108")] +// #[unstable(feature = "nonpoison_mutex", issue = "134645")] +#[clippy::has_significant_drop] +pub struct MappedMutexGuard<'a, T: ?Sized + 'a> { + // NB: we use a pointer instead of `&'a mut T` to avoid `noalias` violations, because a + // `MappedMutexGuard` argument doesn't hold uniqueness for its whole scope, only until it drops. + // `NonNull` is covariant over `T`, so we add a `PhantomData<&'a mut T>` field + // below for the correct variance over `T` (invariance). + data: NonNull, + inner: &'a sys::Mutex, + _variance: PhantomData<&'a mut T>, +} + +#[unstable(feature = "mapped_lock_guards", issue = "117108")] +// #[unstable(feature = "nonpoison_mutex", issue = "134645")] +impl !Send for MappedMutexGuard<'_, T> {} +#[unstable(feature = "mapped_lock_guards", issue = "117108")] +// #[unstable(feature = "nonpoison_mutex", issue = "134645")] +unsafe impl Sync for MappedMutexGuard<'_, T> {} + +impl Mutex { + /// Creates a new mutex in an unlocked state ready for use. + /// + /// # Examples + /// + /// ``` + /// #![feature(nonpoison_mutex)] + /// + /// use std::sync::nonpoison::Mutex; + /// + /// let mutex = Mutex::new(0); + /// ``` + #[unstable(feature = "nonpoison_mutex", issue = "134645")] + #[inline] + pub const fn new(t: T) -> Mutex { + Mutex { inner: sys::Mutex::new(), data: UnsafeCell::new(t) } + } + + /// Returns the contained value by cloning it. + /// + /// # Examples + /// + /// ``` + /// #![feature(nonpoison_mutex)] + /// #![feature(lock_value_accessors)] + /// + /// use std::sync::nonpoison::Mutex; + /// + /// let mut mutex = Mutex::new(7); + /// + /// assert_eq!(mutex.get_cloned(), 7); + /// ``` + #[unstable(feature = "lock_value_accessors", issue = "133407")] + // #[unstable(feature = "nonpoison_mutex", issue = "134645")] + pub fn get_cloned(&self) -> T + where + T: Clone, + { + self.lock().clone() + } + + /// Sets the contained value. + /// + /// # Examples + /// + /// ``` + /// #![feature(nonpoison_mutex)] + /// #![feature(lock_value_accessors)] + /// + /// use std::sync::nonpoison::Mutex; + /// + /// let mut mutex = Mutex::new(7); + /// + /// assert_eq!(mutex.get_cloned(), 7); + /// mutex.set(11); + /// assert_eq!(mutex.get_cloned(), 11); + /// ``` + #[unstable(feature = "lock_value_accessors", issue = "133407")] + // #[unstable(feature = "nonpoison_mutex", issue = "134645")] + pub fn set(&self, value: T) { + if mem::needs_drop::() { + // If the contained value has a non-trivial destructor, we + // call that destructor after the lock has been released. + drop(self.replace(value)) + } else { + *self.lock() = value; + } + } + + /// Replaces the contained value with `value`, and returns the old contained value. + /// + /// # Examples + /// + /// ``` + /// #![feature(nonpoison_mutex)] + /// #![feature(lock_value_accessors)] + /// + /// use std::sync::nonpoison::Mutex; + /// + /// let mut mutex = Mutex::new(7); + /// + /// assert_eq!(mutex.replace(11), 7); + /// assert_eq!(mutex.get_cloned(), 11); + /// ``` + #[unstable(feature = "lock_value_accessors", issue = "133407")] + // #[unstable(feature = "nonpoison_mutex", issue = "134645")] + pub fn replace(&self, value: T) -> T { + let mut guard = self.lock(); + mem::replace(&mut *guard, value) + } +} + +impl Mutex { + /// Acquires a mutex, blocking the current thread until it is able to do so. + /// + /// This function will block the local thread until it is available to acquire + /// the mutex. Upon returning, the thread is the only thread with the lock + /// held. An RAII guard is returned to allow scoped unlock of the lock. When + /// the guard goes out of scope, the mutex will be unlocked. + /// + /// The exact behavior on locking a mutex in the thread which already holds + /// the lock is left unspecified. However, this function will not return on + /// the second call (it might panic or deadlock, for example). + /// + /// # Panics + /// + /// This function might panic when called if the lock is already held by + /// the current thread. + /// + /// # Examples + /// + /// ``` + /// #![feature(nonpoison_mutex)] + /// + /// use std::sync::{Arc, nonpoison::Mutex}; + /// use std::thread; + /// + /// let mutex = Arc::new(Mutex::new(0)); + /// let c_mutex = Arc::clone(&mutex); + /// + /// thread::spawn(move || { + /// *c_mutex.lock() = 10; + /// }).join().expect("thread::spawn failed"); + /// assert_eq!(*mutex.lock(), 10); + /// ``` + #[unstable(feature = "nonpoison_mutex", issue = "134645")] + pub fn lock(&self) -> MutexGuard<'_, T> { + unsafe { + self.inner.lock(); + MutexGuard::new(self) + } + } + + /// Attempts to acquire this lock. + /// + /// This function does not block. If the lock could not be acquired at this time, then + /// [`WouldBlock`] is returned. Otherwise, an RAII guard is returned. + /// + /// The lock will be unlocked when the guard is dropped. + /// + /// # Errors + /// + /// If the mutex could not be acquired because it is already locked, then this call will return + /// the [`WouldBlock`] error. + /// + /// # Examples + /// + /// ``` + /// use std::sync::{Arc, Mutex}; + /// use std::thread; + /// + /// let mutex = Arc::new(Mutex::new(0)); + /// let c_mutex = Arc::clone(&mutex); + /// + /// thread::spawn(move || { + /// let mut lock = c_mutex.try_lock(); + /// if let Ok(ref mut mutex) = lock { + /// **mutex = 10; + /// } else { + /// println!("try_lock failed"); + /// } + /// }).join().expect("thread::spawn failed"); + /// assert_eq!(*mutex.lock().unwrap(), 10); + /// ``` + #[unstable(feature = "nonpoison_mutex", issue = "134645")] + pub fn try_lock(&self) -> TryLockResult> { + unsafe { if self.inner.try_lock() { Ok(MutexGuard::new(self)) } else { Err(WouldBlock) } } + } + + /// Consumes this mutex, returning the underlying data. + /// + /// # Examples + /// + /// ``` + /// #![feature(nonpoison_mutex)] + /// + /// use std::sync::nonpoison::Mutex; + /// + /// let mutex = Mutex::new(0); + /// assert_eq!(mutex.into_inner(), 0); + /// ``` + #[unstable(feature = "nonpoison_mutex", issue = "134645")] + pub fn into_inner(self) -> T + where + T: Sized, + { + self.data.into_inner() + } + + /// Returns a mutable reference to the underlying data. + /// + /// Since this call borrows the `Mutex` mutably, no actual locking needs to + /// take place -- the mutable borrow statically guarantees no locks exist. + /// + /// # Examples + /// + /// ``` + /// #![feature(nonpoison_mutex)] + /// + /// use std::sync::nonpoison::Mutex; + /// + /// let mut mutex = Mutex::new(0); + /// *mutex.get_mut() = 10; + /// assert_eq!(*mutex.lock(), 10); + /// ``` + #[unstable(feature = "nonpoison_mutex", issue = "134645")] + pub fn get_mut(&mut self) -> &mut T { + self.data.get_mut() + } + + /// Returns a raw pointer to the underlying data. + /// + /// The returned pointer is always non-null and properly aligned, but it is + /// the user's responsibility to ensure that any reads and writes through it + /// are properly synchronized to avoid data races, and that it is not read + /// or written through after the mutex is dropped. + #[unstable(feature = "mutex_data_ptr", issue = "140368")] + // #[unstable(feature = "nonpoison_mutex", issue = "134645")] + pub fn data_ptr(&self) -> *mut T { + self.data.get() + } +} + +#[unstable(feature = "nonpoison_mutex", issue = "134645")] +impl From for Mutex { + /// Creates a new mutex in an unlocked state ready for use. + /// This is equivalent to [`Mutex::new`]. + fn from(t: T) -> Self { + Mutex::new(t) + } +} + +#[unstable(feature = "nonpoison_mutex", issue = "134645")] +impl Default for Mutex { + /// Creates a `Mutex`, with the `Default` value for T. + fn default() -> Mutex { + Mutex::new(Default::default()) + } +} + +#[unstable(feature = "nonpoison_mutex", issue = "134645")] +impl fmt::Debug for Mutex { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut d = f.debug_struct("Mutex"); + match self.try_lock() { + Ok(guard) => { + d.field("data", &&*guard); + } + Err(WouldBlock) => { + d.field("data", &""); + } + } + d.finish_non_exhaustive() + } +} + +impl<'mutex, T: ?Sized> MutexGuard<'mutex, T> { + unsafe fn new(lock: &'mutex Mutex) -> MutexGuard<'mutex, T> { + return MutexGuard { lock }; + } +} + +#[unstable(feature = "nonpoison_mutex", issue = "134645")] +impl Deref for MutexGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + unsafe { &*self.lock.data.get() } + } +} + +#[unstable(feature = "nonpoison_mutex", issue = "134645")] +impl DerefMut for MutexGuard<'_, T> { + fn deref_mut(&mut self) -> &mut T { + unsafe { &mut *self.lock.data.get() } + } +} + +#[unstable(feature = "nonpoison_mutex", issue = "134645")] +impl Drop for MutexGuard<'_, T> { + #[inline] + fn drop(&mut self) { + unsafe { + self.lock.inner.unlock(); + } + } +} + +#[unstable(feature = "nonpoison_mutex", issue = "134645")] +impl fmt::Debug for MutexGuard<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&**self, f) + } +} + +#[unstable(feature = "nonpoison_mutex", issue = "134645")] +impl fmt::Display for MutexGuard<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (**self).fmt(f) + } +} + +impl<'a, T: ?Sized> MutexGuard<'a, T> { + /// Makes a [`MappedMutexGuard`] for a component of the borrowed data, e.g. + /// an enum variant. + /// + /// The `Mutex` is already locked, so this cannot fail. + /// + /// This is an associated function that needs to be used as + /// `MutexGuard::map(...)`. A method would interfere with methods of the + /// same name on the contents of the `MutexGuard` used through `Deref`. + #[unstable(feature = "mapped_lock_guards", issue = "117108")] + // #[unstable(feature = "nonpoison_mutex", issue = "134645")] + pub fn map(orig: Self, f: F) -> MappedMutexGuard<'a, U> + where + F: FnOnce(&mut T) -> &mut U, + U: ?Sized, + { + // SAFETY: the conditions of `MutexGuard::new` were satisfied when the original guard + // was created, and have been upheld throughout `map` and/or `filter_map`. + // The signature of the closure guarantees that it will not "leak" the lifetime of the reference + // passed to it. If the closure panics, the guard will be dropped. + let data = NonNull::from(f(unsafe { &mut *orig.lock.data.get() })); + let orig = ManuallyDrop::new(orig); + MappedMutexGuard { data, inner: &orig.lock.inner, _variance: PhantomData } + } + + /// Makes a [`MappedMutexGuard`] for a component of the borrowed data. The + /// original guard is returned as an `Err(...)` if the closure returns + /// `None`. + /// + /// The `Mutex` is already locked, so this cannot fail. + /// + /// This is an associated function that needs to be used as + /// `MutexGuard::filter_map(...)`. A method would interfere with methods of the + /// same name on the contents of the `MutexGuard` used through `Deref`. + #[unstable(feature = "mapped_lock_guards", issue = "117108")] + // #[unstable(feature = "nonpoison_mutex", issue = "134645")] + pub fn filter_map(orig: Self, f: F) -> Result, Self> + where + F: FnOnce(&mut T) -> Option<&mut U>, + U: ?Sized, + { + // SAFETY: the conditions of `MutexGuard::new` were satisfied when the original guard + // was created, and have been upheld throughout `map` and/or `filter_map`. + // The signature of the closure guarantees that it will not "leak" the lifetime of the reference + // passed to it. If the closure panics, the guard will be dropped. + match f(unsafe { &mut *orig.lock.data.get() }) { + Some(data) => { + let data = NonNull::from(data); + let orig = ManuallyDrop::new(orig); + Ok(MappedMutexGuard { data, inner: &orig.lock.inner, _variance: PhantomData }) + } + None => Err(orig), + } + } +} + +#[unstable(feature = "mapped_lock_guards", issue = "117108")] +impl Deref for MappedMutexGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + unsafe { self.data.as_ref() } + } +} + +#[unstable(feature = "mapped_lock_guards", issue = "117108")] +impl DerefMut for MappedMutexGuard<'_, T> { + fn deref_mut(&mut self) -> &mut T { + unsafe { self.data.as_mut() } + } +} + +#[unstable(feature = "mapped_lock_guards", issue = "117108")] +impl Drop for MappedMutexGuard<'_, T> { + #[inline] + fn drop(&mut self) { + unsafe { + self.inner.unlock(); + } + } +} + +#[unstable(feature = "mapped_lock_guards", issue = "117108")] +impl fmt::Debug for MappedMutexGuard<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&**self, f) + } +} + +#[unstable(feature = "mapped_lock_guards", issue = "117108")] +impl fmt::Display for MappedMutexGuard<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (**self).fmt(f) + } +} + +impl<'a, T: ?Sized> MappedMutexGuard<'a, T> { + /// Makes a [`MappedMutexGuard`] for a component of the borrowed data, e.g. + /// an enum variant. + /// + /// The `Mutex` is already locked, so this cannot fail. + /// + /// This is an associated function that needs to be used as + /// `MappedMutexGuard::map(...)`. A method would interfere with methods of the + /// same name on the contents of the `MutexGuard` used through `Deref`. + #[unstable(feature = "mapped_lock_guards", issue = "117108")] + // #[unstable(feature = "nonpoison_mutex", issue = "134645")] + pub fn map(mut orig: Self, f: F) -> MappedMutexGuard<'a, U> + where + F: FnOnce(&mut T) -> &mut U, + U: ?Sized, + { + // SAFETY: the conditions of `MutexGuard::new` were satisfied when the original guard + // was created, and have been upheld throughout `map` and/or `filter_map`. + // The signature of the closure guarantees that it will not "leak" the lifetime of the reference + // passed to it. If the closure panics, the guard will be dropped. + let data = NonNull::from(f(unsafe { orig.data.as_mut() })); + let orig = ManuallyDrop::new(orig); + MappedMutexGuard { data, inner: orig.inner, _variance: PhantomData } + } + + /// Makes a [`MappedMutexGuard`] for a component of the borrowed data. The + /// original guard is returned as an `Err(...)` if the closure returns + /// `None`. + /// + /// The `Mutex` is already locked, so this cannot fail. + /// + /// This is an associated function that needs to be used as + /// `MappedMutexGuard::filter_map(...)`. A method would interfere with methods of the + /// same name on the contents of the `MutexGuard` used through `Deref`. + #[unstable(feature = "mapped_lock_guards", issue = "117108")] + // #[unstable(feature = "nonpoison_mutex", issue = "134645")] + pub fn filter_map(mut orig: Self, f: F) -> Result, Self> + where + F: FnOnce(&mut T) -> Option<&mut U>, + U: ?Sized, + { + // SAFETY: the conditions of `MutexGuard::new` were satisfied when the original guard + // was created, and have been upheld throughout `map` and/or `filter_map`. + // The signature of the closure guarantees that it will not "leak" the lifetime of the reference + // passed to it. If the closure panics, the guard will be dropped. + match f(unsafe { orig.data.as_mut() }) { + Some(data) => { + let data = NonNull::from(data); + let orig = ManuallyDrop::new(orig); + Ok(MappedMutexGuard { data, inner: orig.inner, _variance: PhantomData }) + } + None => Err(orig), + } + } +} diff --git a/library/std/src/sync/poison.rs b/library/std/src/sync/poison.rs index 0c05f152ef84c..b901a5701a488 100644 --- a/library/std/src/sync/poison.rs +++ b/library/std/src/sync/poison.rs @@ -13,7 +13,9 @@ //! depend on the primitive. See [#Overview] below. //! //! For the alternative implementations that do not employ poisoning, -//! see `std::sync::nonpoisoning`. +//! see [`std::sync::nonpoison`]. +//! +//! [`std::sync::nonpoison`]: crate::sync::nonpoison //! //! # Overview //! @@ -56,8 +58,6 @@ //! while it is locked exclusively (write mode). If a panic occurs in any reader, //! then the lock will not be poisoned. -// FIXME(sync_nonpoison) add links to sync::nonpoison to the doc comment above. - #[stable(feature = "rust1", since = "1.0.0")] pub use self::condvar::{Condvar, WaitTimeoutResult}; #[unstable(feature = "mapped_lock_guards", issue = "117108")] diff --git a/library/std/src/sync/poison/condvar.rs b/library/std/src/sync/poison/condvar.rs index 7f0f3f652bcb7..0e9d4233c6577 100644 --- a/library/std/src/sync/poison/condvar.rs +++ b/library/std/src/sync/poison/condvar.rs @@ -13,7 +13,7 @@ use crate::time::{Duration, Instant}; #[stable(feature = "wait_timeout", since = "1.5.0")] pub struct WaitTimeoutResult(bool); -// FIXME(sync_nonpoison): `WaitTimeoutResult` is actually poisoning-agnostic, it seems. +// FIXME(nonpoison_condvar): `WaitTimeoutResult` is actually poisoning-agnostic, it seems. // Should we take advantage of this fact? impl WaitTimeoutResult { /// Returns `true` if the wait was known to have timed out. diff --git a/library/std/src/sync/poison/mutex.rs b/library/std/src/sync/poison/mutex.rs index 30325be685c32..64744f18c746e 100644 --- a/library/std/src/sync/poison/mutex.rs +++ b/library/std/src/sync/poison/mutex.rs @@ -650,7 +650,7 @@ impl fmt::Debug for Mutex { d.field("data", &&**err.get_ref()); } Err(TryLockError::WouldBlock) => { - d.field("data", &format_args!("")); + d.field("data", &""); } } d.field("poisoned", &self.poison.get()); diff --git a/library/std/src/sys/net/connection/uefi/mod.rs b/library/std/src/sys/net/connection/uefi/mod.rs index 884cbd4ac1dc7..16e3487a1747f 100644 --- a/library/std/src/sys/net/connection/uefi/mod.rs +++ b/library/std/src/sys/net/connection/uefi/mod.rs @@ -86,11 +86,11 @@ impl TcpStream { } pub fn peer_addr(&self) -> io::Result { - unsupported() + self.inner.peer_addr() } pub fn socket_addr(&self) -> io::Result { - unsupported() + self.inner.socket_addr() } pub fn shutdown(&self, _: Shutdown) -> io::Result<()> { @@ -114,7 +114,7 @@ impl TcpStream { } pub fn nodelay(&self) -> io::Result { - unsupported() + self.inner.nodelay() } pub fn set_ttl(&self, _: u32) -> io::Result<()> { @@ -122,7 +122,7 @@ impl TcpStream { } pub fn ttl(&self) -> io::Result { - unsupported() + self.inner.ttl() } pub fn take_error(&self) -> io::Result> { @@ -140,7 +140,9 @@ impl fmt::Debug for TcpStream { } } -pub struct TcpListener(!); +pub struct TcpListener { + inner: tcp::Tcp, +} impl TcpListener { pub fn bind(_: io::Result<&SocketAddr>) -> io::Result { @@ -148,45 +150,45 @@ impl TcpListener { } pub fn socket_addr(&self) -> io::Result { - self.0 + unsupported() } pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> { - self.0 + unsupported() } pub fn duplicate(&self) -> io::Result { - self.0 + unsupported() } pub fn set_ttl(&self, _: u32) -> io::Result<()> { - self.0 + unsupported() } pub fn ttl(&self) -> io::Result { - self.0 + self.inner.ttl() } pub fn set_only_v6(&self, _: bool) -> io::Result<()> { - self.0 + unsupported() } pub fn only_v6(&self) -> io::Result { - self.0 + unsupported() } pub fn take_error(&self) -> io::Result> { - self.0 + unsupported() } pub fn set_nonblocking(&self, _: bool) -> io::Result<()> { - self.0 + unsupported() } } impl fmt::Debug for TcpListener { fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.0 + todo!() } } diff --git a/library/std/src/sys/net/connection/uefi/tcp.rs b/library/std/src/sys/net/connection/uefi/tcp.rs index 1152f69446e42..aac97007bbfe5 100644 --- a/library/std/src/sys/net/connection/uefi/tcp.rs +++ b/library/std/src/sys/net/connection/uefi/tcp.rs @@ -1,6 +1,8 @@ use super::tcp4; use crate::io; use crate::net::SocketAddr; +use crate::ptr::NonNull; +use crate::sys::{helpers, unsupported}; use crate::time::Duration; pub(crate) enum Tcp { @@ -31,4 +33,44 @@ impl Tcp { Self::V4(client) => client.read(buf, timeout), } } + + pub(crate) fn ttl(&self) -> io::Result { + match self { + Self::V4(client) => client.get_mode_data().map(|x| x.time_to_live.into()), + } + } + + pub(crate) fn nodelay(&self) -> io::Result { + match self { + Self::V4(client) => { + let temp = client.get_mode_data()?; + match NonNull::new(temp.control_option) { + Some(x) => unsafe { Ok(x.as_ref().enable_nagle.into()) }, + None => unsupported(), + } + } + } + } + + pub fn peer_addr(&self) -> io::Result { + match self { + Self::V4(client) => client.get_mode_data().map(|x| { + SocketAddr::new( + helpers::ipv4_from_r_efi(x.access_point.remote_address).into(), + x.access_point.remote_port, + ) + }), + } + } + + pub fn socket_addr(&self) -> io::Result { + match self { + Self::V4(client) => client.get_mode_data().map(|x| { + SocketAddr::new( + helpers::ipv4_from_r_efi(x.access_point.station_address).into(), + x.access_point.station_port, + ) + }), + } + } } diff --git a/library/std/src/sys/net/connection/uefi/tcp4.rs b/library/std/src/sys/net/connection/uefi/tcp4.rs index 6342718929a7d..75862ff247b4f 100644 --- a/library/std/src/sys/net/connection/uefi/tcp4.rs +++ b/library/std/src/sys/net/connection/uefi/tcp4.rs @@ -67,6 +67,24 @@ impl Tcp4 { if r.is_error() { Err(crate::io::Error::from_raw_os_error(r.as_usize())) } else { Ok(()) } } + pub(crate) fn get_mode_data(&self) -> io::Result { + let mut config_data = tcp4::ConfigData::default(); + let protocol = self.protocol.as_ptr(); + + let r = unsafe { + ((*protocol).get_mode_data)( + protocol, + crate::ptr::null_mut(), + &mut config_data, + crate::ptr::null_mut(), + crate::ptr::null_mut(), + crate::ptr::null_mut(), + ) + }; + + if r.is_error() { Err(io::Error::from_raw_os_error(r.as_usize())) } else { Ok(config_data) } + } + pub(crate) fn connect(&self, timeout: Option) -> io::Result<()> { let evt = unsafe { self.create_evt() }?; let completion_token = diff --git a/library/std/src/sys/pal/hermit/thread.rs b/library/std/src/sys/pal/hermit/thread.rs index 9bc5a16b80023..95fe4f902d338 100644 --- a/library/std/src/sys/pal/hermit/thread.rs +++ b/library/std/src/sys/pal/hermit/thread.rs @@ -58,7 +58,11 @@ impl Thread { } } - pub unsafe fn new(stack: usize, p: Box) -> io::Result { + pub unsafe fn new( + stack: usize, + _name: Option<&str>, + p: Box, + ) -> io::Result { unsafe { Thread::new_with_coreid(stack, p, -1 /* = no specific core */) } diff --git a/library/std/src/sys/pal/itron/thread.rs b/library/std/src/sys/pal/itron/thread.rs index 813e1cbcd58fb..0d28051fcc46e 100644 --- a/library/std/src/sys/pal/itron/thread.rs +++ b/library/std/src/sys/pal/itron/thread.rs @@ -86,7 +86,11 @@ impl Thread { /// # Safety /// /// See `thread::Builder::spawn_unchecked` for safety requirements. - pub unsafe fn new(stack: usize, p: Box) -> io::Result { + pub unsafe fn new( + stack: usize, + _name: Option<&str>, + p: Box, + ) -> io::Result { let inner = Box::new(ThreadInner { start: UnsafeCell::new(ManuallyDrop::new(p)), lifecycle: AtomicUsize::new(LIFECYCLE_INIT), diff --git a/library/std/src/sys/pal/sgx/abi/usercalls/mod.rs b/library/std/src/sys/pal/sgx/abi/usercalls/mod.rs index dea44124f458b..5041770faf661 100644 --- a/library/std/src/sys/pal/sgx/abi/usercalls/mod.rs +++ b/library/std/src/sys/pal/sgx/abi/usercalls/mod.rs @@ -267,7 +267,7 @@ pub fn send(event_set: u64, tcs: Option) -> IoResult<()> { /// Usercall `insecure_time`. See the ABI documentation for more information. #[unstable(feature = "sgx_platform", issue = "56975")] pub fn insecure_time() -> Duration { - let t = unsafe { raw::insecure_time() }; + let t = unsafe { raw::insecure_time().0 }; Duration::new(t / 1_000_000_000, (t % 1_000_000_000) as _) } diff --git a/library/std/src/sys/pal/sgx/thread.rs b/library/std/src/sys/pal/sgx/thread.rs index 85f6dcd96b4a5..a236c3627060f 100644 --- a/library/std/src/sys/pal/sgx/thread.rs +++ b/library/std/src/sys/pal/sgx/thread.rs @@ -96,7 +96,11 @@ pub mod wait_notify { impl Thread { // unsafe: see thread::Builder::spawn_unchecked for safety requirements - pub unsafe fn new(_stack: usize, p: Box) -> io::Result { + pub unsafe fn new( + _stack: usize, + _name: Option<&str>, + p: Box, + ) -> io::Result { let mut queue_lock = task_queue::lock(); unsafe { usercalls::launch_thread()? }; let (task, handle) = task_queue::Task::new(p); diff --git a/library/std/src/sys/pal/teeos/thread.rs b/library/std/src/sys/pal/teeos/thread.rs index b9cdc7a2a58bb..a91d95626e703 100644 --- a/library/std/src/sys/pal/teeos/thread.rs +++ b/library/std/src/sys/pal/teeos/thread.rs @@ -22,7 +22,11 @@ unsafe extern "C" { impl Thread { // unsafe: see thread::Builder::spawn_unchecked for safety requirements - pub unsafe fn new(stack: usize, p: Box) -> io::Result { + pub unsafe fn new( + stack: usize, + _name: Option<&str>, + p: Box, + ) -> io::Result { let p = Box::into_raw(Box::new(p)); let mut native: libc::pthread_t = unsafe { mem::zeroed() }; let mut attr: libc::pthread_attr_t = unsafe { mem::zeroed() }; diff --git a/library/std/src/sys/pal/uefi/helpers.rs b/library/std/src/sys/pal/uefi/helpers.rs index 420481648a7d5..271dc4d11de75 100644 --- a/library/std/src/sys/pal/uefi/helpers.rs +++ b/library/std/src/sys/pal/uefi/helpers.rs @@ -761,3 +761,7 @@ impl Drop for OwnedEvent { pub(crate) const fn ipv4_to_r_efi(addr: crate::net::Ipv4Addr) -> efi::Ipv4Address { efi::Ipv4Address { addr: addr.octets() } } + +pub(crate) const fn ipv4_from_r_efi(ip: efi::Ipv4Address) -> crate::net::Ipv4Addr { + crate::net::Ipv4Addr::new(ip.addr[0], ip.addr[1], ip.addr[2], ip.addr[3]) +} diff --git a/library/std/src/sys/pal/uefi/thread.rs b/library/std/src/sys/pal/uefi/thread.rs index e4776ec42fbba..75c364362b2d8 100644 --- a/library/std/src/sys/pal/uefi/thread.rs +++ b/library/std/src/sys/pal/uefi/thread.rs @@ -11,7 +11,11 @@ pub const DEFAULT_MIN_STACK_SIZE: usize = 64 * 1024; impl Thread { // unsafe: see thread::Builder::spawn_unchecked for safety requirements - pub unsafe fn new(_stack: usize, _p: Box) -> io::Result { + pub unsafe fn new( + _stack: usize, + _name: Option<&str>, + _p: Box, + ) -> io::Result { unsupported() } diff --git a/library/std/src/sys/pal/unix/os.rs b/library/std/src/sys/pal/unix/os.rs index 850bdfdf5b54c..0e68313cc3ed0 100644 --- a/library/std/src/sys/pal/unix/os.rs +++ b/library/std/src/sys/pal/unix/os.rs @@ -633,7 +633,10 @@ pub fn temp_dir() -> PathBuf { } pub fn home_dir() -> Option { - return crate::env::var_os("HOME").or_else(|| unsafe { fallback() }).map(PathBuf::from); + return crate::env::var_os("HOME") + .filter(|s| !s.is_empty()) + .or_else(|| unsafe { fallback() }) + .map(PathBuf::from); #[cfg(any( target_os = "android", diff --git a/library/std/src/sys/pal/unix/stack_overflow.rs b/library/std/src/sys/pal/unix/stack_overflow.rs index a3be2cdf738f5..d89100e69196a 100644 --- a/library/std/src/sys/pal/unix/stack_overflow.rs +++ b/library/std/src/sys/pal/unix/stack_overflow.rs @@ -8,8 +8,8 @@ pub struct Handler { } impl Handler { - pub unsafe fn new() -> Handler { - make_handler(false) + pub unsafe fn new(thread_name: Option>) -> Handler { + make_handler(false, thread_name) } fn null() -> Handler { @@ -72,7 +72,6 @@ mod imp { use crate::sync::OnceLock; use crate::sync::atomic::{Atomic, AtomicBool, AtomicPtr, AtomicUsize, Ordering}; use crate::sys::pal::unix::os; - use crate::thread::with_current_name; use crate::{io, mem, panic, ptr}; // Signal handler for the SIGSEGV and SIGBUS handlers. We've got guard pages @@ -158,13 +157,12 @@ mod imp { if !NEED_ALTSTACK.load(Ordering::Relaxed) { // haven't set up our sigaltstack yet NEED_ALTSTACK.store(true, Ordering::Release); - let handler = unsafe { make_handler(true) }; + let handler = unsafe { make_handler(true, None) }; MAIN_ALTSTACK.store(handler.data, Ordering::Relaxed); mem::forget(handler); if let Some(guard_page_range) = guard_page_range.take() { - let thread_name = with_current_name(|name| name.map(Box::from)); - set_current_info(guard_page_range, thread_name); + set_current_info(guard_page_range, Some(Box::from("main"))); } } @@ -230,14 +228,13 @@ mod imp { /// # Safety /// Mutates the alternate signal stack #[forbid(unsafe_op_in_unsafe_fn)] - pub unsafe fn make_handler(main_thread: bool) -> Handler { + pub unsafe fn make_handler(main_thread: bool, thread_name: Option>) -> Handler { if !NEED_ALTSTACK.load(Ordering::Acquire) { return Handler::null(); } if !main_thread { if let Some(guard_page_range) = unsafe { current_guard() } { - let thread_name = with_current_name(|name| name.map(Box::from)); set_current_info(guard_page_range, thread_name); } } @@ -634,7 +631,10 @@ mod imp { pub unsafe fn cleanup() {} - pub unsafe fn make_handler(_main_thread: bool) -> super::Handler { + pub unsafe fn make_handler( + _main_thread: bool, + _thread_name: Option>, + ) -> super::Handler { super::Handler::null() } @@ -717,7 +717,10 @@ mod imp { pub unsafe fn cleanup() {} - pub unsafe fn make_handler(main_thread: bool) -> super::Handler { + pub unsafe fn make_handler( + main_thread: bool, + _thread_name: Option>, + ) -> super::Handler { if !main_thread { reserve_stack(); } diff --git a/library/std/src/sys/pal/unix/thread.rs b/library/std/src/sys/pal/unix/thread.rs index e4f5520d8a33e..7f6440152d4bc 100644 --- a/library/std/src/sys/pal/unix/thread.rs +++ b/library/std/src/sys/pal/unix/thread.rs @@ -22,6 +22,11 @@ pub const DEFAULT_MIN_STACK_SIZE: usize = 256 * 1024; #[cfg(any(target_os = "espidf", target_os = "nuttx"))] pub const DEFAULT_MIN_STACK_SIZE: usize = 0; // 0 indicates that the stack size configured in the ESP-IDF/NuttX menuconfig system should be used +struct ThreadData { + name: Option>, + f: Box, +} + pub struct Thread { id: libc::pthread_t, } @@ -34,8 +39,12 @@ unsafe impl Sync for Thread {} impl Thread { // unsafe: see thread::Builder::spawn_unchecked for safety requirements #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces - pub unsafe fn new(stack: usize, p: Box) -> io::Result { - let p = Box::into_raw(Box::new(p)); + pub unsafe fn new( + stack: usize, + name: Option<&str>, + f: Box, + ) -> io::Result { + let data = Box::into_raw(Box::new(ThreadData { name: name.map(Box::from), f })); let mut native: libc::pthread_t = mem::zeroed(); let mut attr: mem::MaybeUninit = mem::MaybeUninit::uninit(); assert_eq!(libc::pthread_attr_init(attr.as_mut_ptr()), 0); @@ -73,7 +82,7 @@ impl Thread { }; } - let ret = libc::pthread_create(&mut native, attr.as_ptr(), thread_start, p as *mut _); + let ret = libc::pthread_create(&mut native, attr.as_ptr(), thread_start, data as *mut _); // Note: if the thread creation fails and this assert fails, then p will // be leaked. However, an alternative design could cause double-free // which is clearly worse. @@ -82,19 +91,20 @@ impl Thread { return if ret != 0 { // The thread failed to start and as a result p was not consumed. Therefore, it is // safe to reconstruct the box so that it gets deallocated. - drop(Box::from_raw(p)); + drop(Box::from_raw(data)); Err(io::Error::from_raw_os_error(ret)) } else { Ok(Thread { id: native }) }; - extern "C" fn thread_start(main: *mut libc::c_void) -> *mut libc::c_void { + extern "C" fn thread_start(data: *mut libc::c_void) -> *mut libc::c_void { unsafe { + let data = Box::from_raw(data as *mut ThreadData); // Next, set up our stack overflow handler which may get triggered if we run // out of stack. - let _handler = stack_overflow::Handler::new(); + let _handler = stack_overflow::Handler::new(data.name); // Finally, let's run some code. - Box::from_raw(main as *mut Box)(); + (data.f)(); } ptr::null_mut() } diff --git a/library/std/src/sys/pal/unsupported/thread.rs b/library/std/src/sys/pal/unsupported/thread.rs index 8a3119fa292d1..5a1e3fde98606 100644 --- a/library/std/src/sys/pal/unsupported/thread.rs +++ b/library/std/src/sys/pal/unsupported/thread.rs @@ -10,7 +10,11 @@ pub const DEFAULT_MIN_STACK_SIZE: usize = 64 * 1024; impl Thread { // unsafe: see thread::Builder::spawn_unchecked for safety requirements - pub unsafe fn new(_stack: usize, _p: Box) -> io::Result { + pub unsafe fn new( + _stack: usize, + _name: Option<&str>, + _p: Box, + ) -> io::Result { unsupported() } diff --git a/library/std/src/sys/pal/wasi/thread.rs b/library/std/src/sys/pal/wasi/thread.rs index 5f21a553673a3..a46c74630c97a 100644 --- a/library/std/src/sys/pal/wasi/thread.rs +++ b/library/std/src/sys/pal/wasi/thread.rs @@ -73,7 +73,7 @@ impl Thread { // unsafe: see thread::Builder::spawn_unchecked for safety requirements cfg_if::cfg_if! { if #[cfg(target_feature = "atomics")] { - pub unsafe fn new(stack: usize, p: Box) -> io::Result { + pub unsafe fn new(stack: usize, _name: Option<&str>, p: Box) -> io::Result { let p = Box::into_raw(Box::new(p)); let mut native: libc::pthread_t = unsafe { mem::zeroed() }; let mut attr: libc::pthread_attr_t = unsafe { mem::zeroed() }; @@ -120,7 +120,7 @@ impl Thread { } } } else { - pub unsafe fn new(_stack: usize, _p: Box) -> io::Result { + pub unsafe fn new(_stack: usize, _name: Option<&str>, _p: Box) -> io::Result { crate::sys::unsupported() } } diff --git a/library/std/src/sys/pal/wasm/atomics/thread.rs b/library/std/src/sys/pal/wasm/atomics/thread.rs index 44ce3eab109f4..ebfabaafc794d 100644 --- a/library/std/src/sys/pal/wasm/atomics/thread.rs +++ b/library/std/src/sys/pal/wasm/atomics/thread.rs @@ -10,7 +10,11 @@ pub const DEFAULT_MIN_STACK_SIZE: usize = 1024 * 1024; impl Thread { // unsafe: see thread::Builder::spawn_unchecked for safety requirements - pub unsafe fn new(_stack: usize, _p: Box) -> io::Result { + pub unsafe fn new( + _stack: usize, + _name: Option<&str>, + _p: Box, + ) -> io::Result { unsupported() } diff --git a/library/std/src/sys/pal/windows/thread.rs b/library/std/src/sys/pal/windows/thread.rs index 147851717553a..b45f76fb546f0 100644 --- a/library/std/src/sys/pal/windows/thread.rs +++ b/library/std/src/sys/pal/windows/thread.rs @@ -20,7 +20,11 @@ pub struct Thread { impl Thread { // unsafe: see thread::Builder::spawn_unchecked for safety requirements #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces - pub unsafe fn new(stack: usize, p: Box) -> io::Result { + pub unsafe fn new( + stack: usize, + _name: Option<&str>, + p: Box, + ) -> io::Result { let p = Box::into_raw(Box::new(p)); // CreateThread rounds up values for the stack size to the nearest page size (at least 4kb). diff --git a/library/std/src/sys/pal/xous/thread.rs b/library/std/src/sys/pal/xous/thread.rs index 1b344e984dc36..f2404a62abf97 100644 --- a/library/std/src/sys/pal/xous/thread.rs +++ b/library/std/src/sys/pal/xous/thread.rs @@ -20,7 +20,11 @@ pub const GUARD_PAGE_SIZE: usize = 4096; impl Thread { // unsafe: see thread::Builder::spawn_unchecked for safety requirements - pub unsafe fn new(stack: usize, p: Box) -> io::Result { + pub unsafe fn new( + stack: usize, + _name: Option<&str>, + p: Box, + ) -> io::Result { let p = Box::into_raw(Box::new(p)); let mut stack_size = crate::cmp::max(stack, MIN_STACK_SIZE); diff --git a/library/std/src/sys/random/sgx.rs b/library/std/src/sys/random/sgx.rs index c3647a8df220e..462b19003fad2 100644 --- a/library/std/src/sys/random/sgx.rs +++ b/library/std/src/sys/random/sgx.rs @@ -46,22 +46,22 @@ fn rdrand16() -> u16 { } pub fn fill_bytes(bytes: &mut [u8]) { - let mut chunks = bytes.array_chunks_mut(); - for chunk in &mut chunks { + let (chunks, remainder) = bytes.as_chunks_mut(); + for chunk in chunks { *chunk = rdrand64().to_ne_bytes(); } - let mut chunks = chunks.into_remainder().array_chunks_mut(); - for chunk in &mut chunks { + let (chunks, remainder) = remainder.as_chunks_mut(); + for chunk in chunks { *chunk = rdrand32().to_ne_bytes(); } - let mut chunks = chunks.into_remainder().array_chunks_mut(); - for chunk in &mut chunks { + let (chunks, remainder) = remainder.as_chunks_mut(); + for chunk in chunks { *chunk = rdrand16().to_ne_bytes(); } - if let [byte] = chunks.into_remainder() { + if let [byte] = remainder { *byte = rdrand16() as u8; } } diff --git a/library/std/src/sys/random/uefi.rs b/library/std/src/sys/random/uefi.rs index 5f001f0f532a0..4a71d32fffeb4 100644 --- a/library/std/src/sys/random/uefi.rs +++ b/library/std/src/sys/random/uefi.rs @@ -138,12 +138,11 @@ mod rdrand { } unsafe fn rdrand_exact(dest: &mut [u8]) -> Option<()> { - let mut chunks = dest.array_chunks_mut(); - for chunk in &mut chunks { + let (chunks, tail) = dest.as_chunks_mut(); + for chunk in chunks { *chunk = unsafe { rdrand() }?.to_ne_bytes(); } - let tail = chunks.into_remainder(); let n = tail.len(); if n > 0 { let src = unsafe { rdrand() }?.to_ne_bytes(); diff --git a/library/std/src/sys/thread_local/guard/windows.rs b/library/std/src/sys/thread_local/guard/windows.rs index b15a0d7c0bdfb..f747129465d6d 100644 --- a/library/std/src/sys/thread_local/guard/windows.rs +++ b/library/std/src/sys/thread_local/guard/windows.rs @@ -58,7 +58,7 @@ //! We don't actually use the `/INCLUDE` linker flag here like the article //! mentions because the Rust compiler doesn't propagate linker flags, but //! instead we use a shim function which performs a volatile 1-byte load from -//! the address of the symbol to ensure it sticks around. +//! the address of the _tls_used symbol to ensure it sticks around. //! //! [1]: https://www.codeproject.com/Articles/8113/Thread-Local-Storage-The-C-Way //! [2]: https://github.com/ChromiumWebApps/chromium/blob/master/base/threading/thread_local_storage_win.cc#L42 @@ -68,9 +68,20 @@ use core::ffi::c_void; use crate::ptr; use crate::sys::c; +unsafe extern "C" { + #[link_name = "_tls_used"] + static TLS_USED: u8; +} pub fn enable() { - // When destructors are used, we don't want LLVM eliminating CALLBACK for any - // reason. Once the symbol makes it to the linker, it will do the rest. + // When destructors are used, we need to add a reference to the _tls_used + // symbol provided by the CRT, otherwise the TLS support code will get + // GC'd by the linker and our callback won't be called. + unsafe { ptr::from_ref(&TLS_USED).read_volatile() }; + // We also need to reference CALLBACK to make sure it does not get GC'd + // by the compiler/LLVM. The callback will end up inside the TLS + // callback array pointed to by _TLS_USED through linker shenanigans, + // but as far as the compiler is concerned, it looks like the data is + // unused, so we need this hack to prevent it from disappearing. unsafe { ptr::from_ref(&CALLBACK).read_volatile() }; } diff --git a/library/std/src/thread/mod.rs b/library/std/src/thread/mod.rs index 6075173db47f4..dff981c900c08 100644 --- a/library/std/src/thread/mod.rs +++ b/library/std/src/thread/mod.rs @@ -595,7 +595,7 @@ impl Builder { // Similarly, the `sys` implementation must guarantee that no references to the closure // exist after the thread has terminated, which is signaled by `Thread::join` // returning. - native: unsafe { imp::Thread::new(stack_size, main)? }, + native: unsafe { imp::Thread::new(stack_size, my_thread.name(), main)? }, thread: my_thread, packet: my_packet, }) @@ -1399,6 +1399,11 @@ where } /// The internal representation of a `Thread` handle +/// +/// We explicitly set the alignment for our guarantee in Thread::into_raw. This +/// allows applications to stuff extra metadata bits into the alignment, which +/// can be rather useful when working with atomics. +#[repr(align(8))] struct Inner { name: Option, id: ThreadId, @@ -1582,7 +1587,8 @@ impl Thread { /// Consumes the `Thread`, returning a raw pointer. /// /// To avoid a memory leak the pointer must be converted - /// back into a `Thread` using [`Thread::from_raw`]. + /// back into a `Thread` using [`Thread::from_raw`]. The pointer is + /// guaranteed to be aligned to at least 8 bytes. /// /// # Examples /// diff --git a/library/std/tests/sync/lib.rs b/library/std/tests/sync/lib.rs index 51190f0894fb7..94f1fe96b6a26 100644 --- a/library/std/tests/sync/lib.rs +++ b/library/std/tests/sync/lib.rs @@ -6,7 +6,10 @@ #![feature(reentrant_lock)] #![feature(rwlock_downgrade)] #![feature(std_internals)] +#![feature(sync_nonpoison)] +#![feature(nonpoison_mutex)] #![allow(internal_features)] +#![feature(macro_metavar_expr_concat)] // For concatenating identifiers in macros. mod barrier; mod condvar; @@ -29,3 +32,55 @@ mod rwlock; #[path = "../common/mod.rs"] mod common; + +#[track_caller] +fn result_unwrap(x: Result) -> T { + x.unwrap() +} + +/// A macro that generates two test cases for both the poison and nonpoison locks. +/// +/// To write a test that tests both `poison` and `nonpoison` locks, import any of the types +/// under both `poison` and `nonpoison` using the module name `locks` instead. For example, write +/// `use locks::Mutex;` instead of `use std::sync::poiosn::Mutex`. This will import the correct type +/// for each test variant. +/// +/// Write a test as normal in the `test_body`, but instead of calling `unwrap` on `poison` methods +/// that return a `LockResult` or similar, call the function `maybe_unwrap(...)` on the result. +/// +/// For example, call `maybe_unwrap(mutex.lock())` instead of `mutex.lock().unwrap()` or +/// `maybe_unwrap(rwlock.read())` instead of `rwlock.read().unwrap()`. +/// +/// For the `poison` types, `maybe_unwrap` will simply unwrap the `Result` (usually this is a form +/// of `LockResult`, but it could also be other kinds of results). For the `nonpoison` types, it is +/// a no-op (the identity function). +/// +/// The test names will be prefiex with `poison_` or `nonpoison_`. +macro_rules! nonpoison_and_poison_unwrap_test { + ( + name: $name:ident, + test_body: {$($test_body:tt)*} + ) => { + // Creates the nonpoison test. + #[test] + fn ${concat(nonpoison_, $name)}() { + #[allow(unused_imports)] + use ::std::convert::identity as maybe_unwrap; + use ::std::sync::nonpoison as locks; + + $($test_body)* + } + + // Creates the poison test with the suffix `_unwrap_poisoned`. + #[test] + fn ${concat(poison_, $name)}() { + #[allow(unused_imports)] + use super::result_unwrap as maybe_unwrap; + use ::std::sync::poison as locks; + + $($test_body)* + } + } +} + +use nonpoison_and_poison_unwrap_test; diff --git a/library/std/tests/sync/mutex.rs b/library/std/tests/sync/mutex.rs index ac82914d6de46..90cefc0d59466 100644 --- a/library/std/tests/sync/mutex.rs +++ b/library/std/tests/sync/mutex.rs @@ -6,7 +6,71 @@ use std::sync::mpsc::channel; use std::sync::{Arc, Condvar, MappedMutexGuard, Mutex, MutexGuard, TryLockError}; use std::{hint, mem, thread}; -struct Packet(Arc<(Mutex, Condvar)>); +//////////////////////////////////////////////////////////////////////////////////////////////////// +// Nonpoison & Poison Tests +//////////////////////////////////////////////////////////////////////////////////////////////////// +use super::nonpoison_and_poison_unwrap_test; + +nonpoison_and_poison_unwrap_test!( + name: smoke, + test_body: { + use locks::Mutex; + + let m = Mutex::new(()); + drop(maybe_unwrap(m.lock())); + drop(maybe_unwrap(m.lock())); + } +); + +nonpoison_and_poison_unwrap_test!( + name: lots_and_lots, + test_body: { + use locks::Mutex; + + const J: u32 = 1000; + const K: u32 = 3; + + let m = Arc::new(Mutex::new(0)); + + fn inc(m: &Mutex) { + for _ in 0..J { + *maybe_unwrap(m.lock()) += 1; + } + } + + let (tx, rx) = channel(); + for _ in 0..K { + let tx2 = tx.clone(); + let m2 = m.clone(); + thread::spawn(move || { + inc(&m2); + tx2.send(()).unwrap(); + }); + let tx2 = tx.clone(); + let m2 = m.clone(); + thread::spawn(move || { + inc(&m2); + tx2.send(()).unwrap(); + }); + } + + drop(tx); + for _ in 0..2 * K { + rx.recv().unwrap(); + } + assert_eq!(*maybe_unwrap(m.lock()), J * K * 2); + } +); + +nonpoison_and_poison_unwrap_test!( + name: try_lock, + test_body: { + use locks::Mutex; + + let m = Mutex::new(()); + *m.try_lock().unwrap() = (); + } +); #[derive(Eq, PartialEq, Debug)] struct NonCopy(i32); @@ -26,58 +90,278 @@ fn test_needs_drop() { assert!(mem::needs_drop::()); } -#[derive(Clone, Eq, PartialEq, Debug)] -struct Cloneable(i32); +nonpoison_and_poison_unwrap_test!( + name: test_into_inner, + test_body: { + use locks::Mutex; -#[test] -fn smoke() { - let m = Mutex::new(()); - drop(m.lock().unwrap()); - drop(m.lock().unwrap()); -} + let m = Mutex::new(NonCopy(10)); + assert_eq!(maybe_unwrap(m.into_inner()), NonCopy(10)); + } +); -#[test] -fn lots_and_lots() { - const J: u32 = 1000; - const K: u32 = 3; +nonpoison_and_poison_unwrap_test!( + name: test_into_inner_drop, + test_body: { + use locks::Mutex; - let m = Arc::new(Mutex::new(0)); + struct Foo(Arc); + impl Drop for Foo { + fn drop(&mut self) { + self.0.fetch_add(1, Ordering::SeqCst); + } + } - fn inc(m: &Mutex) { - for _ in 0..J { - *m.lock().unwrap() += 1; + let num_drops = Arc::new(AtomicUsize::new(0)); + let m = Mutex::new(Foo(num_drops.clone())); + assert_eq!(num_drops.load(Ordering::SeqCst), 0); + { + let _inner = maybe_unwrap(m.into_inner()); + assert_eq!(num_drops.load(Ordering::SeqCst), 0); } + assert_eq!(num_drops.load(Ordering::SeqCst), 1); } +); - let (tx, rx) = channel(); - for _ in 0..K { - let tx2 = tx.clone(); - let m2 = m.clone(); - thread::spawn(move || { - inc(&m2); - tx2.send(()).unwrap(); - }); - let tx2 = tx.clone(); - let m2 = m.clone(); - thread::spawn(move || { - inc(&m2); - tx2.send(()).unwrap(); - }); +nonpoison_and_poison_unwrap_test!( + name: test_get_mut, + test_body: { + use locks::Mutex; + + let mut m = Mutex::new(NonCopy(10)); + *maybe_unwrap(m.get_mut()) = NonCopy(20); + assert_eq!(maybe_unwrap(m.into_inner()), NonCopy(20)); } +); - drop(tx); - for _ in 0..2 * K { - rx.recv().unwrap(); +nonpoison_and_poison_unwrap_test!( + name: test_get_cloned, + test_body: { + use locks::Mutex; + + #[derive(Clone, Eq, PartialEq, Debug)] + struct Cloneable(i32); + + let m = Mutex::new(Cloneable(10)); + + assert_eq!(maybe_unwrap(m.get_cloned()), Cloneable(10)); } - assert_eq!(*m.lock().unwrap(), J * K * 2); -} +); + +nonpoison_and_poison_unwrap_test!( + name: test_set, + test_body: { + use locks::Mutex; + + fn inner(mut init: impl FnMut() -> T, mut value: impl FnMut() -> T) + where + T: Debug + Eq, + { + let m = Mutex::new(init()); + + assert_eq!(*maybe_unwrap(m.lock()), init()); + maybe_unwrap(m.set(value())); + assert_eq!(*maybe_unwrap(m.lock()), value()); + } + + inner(|| NonCopy(10), || NonCopy(20)); + inner(|| NonCopyNeedsDrop(10), || NonCopyNeedsDrop(20)); + } +); + +// Ensure that old values that are replaced by `set` are correctly dropped. +nonpoison_and_poison_unwrap_test!( + name: test_set_drop, + test_body: { + use locks::Mutex; + + struct Foo(Arc); + impl Drop for Foo { + fn drop(&mut self) { + self.0.fetch_add(1, Ordering::SeqCst); + } + } + + let num_drops = Arc::new(AtomicUsize::new(0)); + let m = Mutex::new(Foo(num_drops.clone())); + assert_eq!(num_drops.load(Ordering::SeqCst), 0); + + let different = Foo(Arc::new(AtomicUsize::new(42))); + maybe_unwrap(m.set(different)); + assert_eq!(num_drops.load(Ordering::SeqCst), 1); + } +); + +nonpoison_and_poison_unwrap_test!( + name: test_replace, + test_body: { + use locks::Mutex; + + fn inner(mut init: impl FnMut() -> T, mut value: impl FnMut() -> T) + where + T: Debug + Eq, + { + let m = Mutex::new(init()); + + assert_eq!(*maybe_unwrap(m.lock()), init()); + assert_eq!(maybe_unwrap(m.replace(value())), init()); + assert_eq!(*maybe_unwrap(m.lock()), value()); + } + inner(|| NonCopy(10), || NonCopy(20)); + inner(|| NonCopyNeedsDrop(10), || NonCopyNeedsDrop(20)); + } +); + +// FIXME(nonpoison_condvar): Move this to the `condvar.rs` test file once `nonpoison::condvar` gets +// implemented. #[test] -fn try_lock() { - let m = Mutex::new(()); - *m.try_lock().unwrap() = (); +fn test_mutex_arc_condvar() { + struct Packet(Arc<(Mutex, Condvar)>); + + let packet = Packet(Arc::new((Mutex::new(false), Condvar::new()))); + let packet2 = Packet(packet.0.clone()); + + let (tx, rx) = channel(); + + let _t = thread::spawn(move || { + // Wait until our parent has taken the lock. + rx.recv().unwrap(); + let &(ref lock, ref cvar) = &*packet2.0; + + // Set the data to `true` and wake up our parent. + let mut guard = lock.lock().unwrap(); + *guard = true; + cvar.notify_one(); + }); + + let &(ref lock, ref cvar) = &*packet.0; + let mut guard = lock.lock().unwrap(); + // Wake up our child. + tx.send(()).unwrap(); + + // Wait until our child has set the data to `true`. + assert!(!*guard); + while !*guard { + guard = cvar.wait(guard).unwrap(); + } } +nonpoison_and_poison_unwrap_test!( + name: test_mutex_arc_nested, + test_body: { + use locks::Mutex; + + // Tests nested mutexes and access + // to underlying data. + let arc = Arc::new(Mutex::new(1)); + let arc2 = Arc::new(Mutex::new(arc)); + let (tx, rx) = channel(); + let _t = thread::spawn(move || { + let lock = maybe_unwrap(arc2.lock()); + let lock2 = maybe_unwrap(lock.lock()); + assert_eq!(*lock2, 1); + tx.send(()).unwrap(); + }); + rx.recv().unwrap(); + } +); + +nonpoison_and_poison_unwrap_test!( + name: test_mutex_unsized, + test_body: { + use locks::Mutex; + + let mutex: &Mutex<[i32]> = &Mutex::new([1, 2, 3]); + { + let b = &mut *maybe_unwrap(mutex.lock()); + b[0] = 4; + b[2] = 5; + } + let comp: &[i32] = &[4, 2, 5]; + assert_eq!(&*maybe_unwrap(mutex.lock()), comp); + } +); + +nonpoison_and_poison_unwrap_test!( + name: test_mapping_mapped_guard, + test_body: { + use locks::{Mutex, MutexGuard, MappedMutexGuard}; + + let arr = [0; 4]; + let lock = Mutex::new(arr); + let guard = maybe_unwrap(lock.lock()); + let guard = MutexGuard::map(guard, |arr| &mut arr[..2]); + let mut guard = MappedMutexGuard::map(guard, |slice| &mut slice[1..]); + assert_eq!(guard.len(), 1); + guard[0] = 42; + drop(guard); + assert_eq!(*maybe_unwrap(lock.lock()), [0, 42, 0, 0]); + } +); + +#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] +nonpoison_and_poison_unwrap_test!( + name: test_panics, + test_body: { + use locks::Mutex; + + let mutex = Mutex::new(42); + + let catch_unwind_result1 = panic::catch_unwind(AssertUnwindSafe(|| { + let _guard1 = maybe_unwrap(mutex.lock()); + + panic!("test panic with mutex once"); + })); + assert!(catch_unwind_result1.is_err()); + + let catch_unwind_result2 = panic::catch_unwind(AssertUnwindSafe(|| { + let _guard2 = maybe_unwrap(mutex.lock()); + + panic!("test panic with mutex twice"); + })); + assert!(catch_unwind_result2.is_err()); + + let catch_unwind_result3 = panic::catch_unwind(AssertUnwindSafe(|| { + let _guard3 = maybe_unwrap(mutex.lock()); + + panic!("test panic with mutex thrice"); + })); + assert!(catch_unwind_result3.is_err()); + } +); + +#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] +nonpoison_and_poison_unwrap_test!( + name: test_mutex_arc_access_in_unwind, + test_body: { + use locks::Mutex; + + let arc = Arc::new(Mutex::new(1)); + let arc2 = arc.clone(); + let _ = thread::spawn(move || -> () { + struct Unwinder { + i: Arc>, + } + impl Drop for Unwinder { + fn drop(&mut self) { + *maybe_unwrap(self.i.lock()) += 1; + } + } + let _u = Unwinder { i: arc2 }; + panic!(); + }) + .join(); + let lock = maybe_unwrap(arc.lock()); + assert_eq!(*lock, 2); + } +); + +//////////////////////////////////////////////////////////////////////////////////////////////////// +// Poison Tests +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Creates a mutex that is immediately poisoned. fn new_poisoned_mutex(value: T) -> Mutex { let mutex = Mutex::new(value); @@ -93,30 +377,6 @@ fn new_poisoned_mutex(value: T) -> Mutex { mutex } -#[test] -fn test_into_inner() { - let m = Mutex::new(NonCopy(10)); - assert_eq!(m.into_inner().unwrap(), NonCopy(10)); -} - -#[test] -fn test_into_inner_drop() { - struct Foo(Arc); - impl Drop for Foo { - fn drop(&mut self) { - self.0.fetch_add(1, Ordering::SeqCst); - } - } - let num_drops = Arc::new(AtomicUsize::new(0)); - let m = Mutex::new(Foo(num_drops.clone())); - assert_eq!(num_drops.load(Ordering::SeqCst), 0); - { - let _inner = m.into_inner().unwrap(); - assert_eq!(num_drops.load(Ordering::SeqCst), 0); - } - assert_eq!(num_drops.load(Ordering::SeqCst), 1); -} - #[test] #[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] fn test_into_inner_poison() { @@ -128,16 +388,12 @@ fn test_into_inner_poison() { } } -#[test] -fn test_get_cloned() { - let m = Mutex::new(Cloneable(10)); - - assert_eq!(m.get_cloned().unwrap(), Cloneable(10)); -} - #[test] #[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] fn test_get_cloned_poison() { + #[derive(Clone, Eq, PartialEq, Debug)] + struct Cloneable(i32); + let m = new_poisoned_mutex(Cloneable(10)); match m.get_cloned() { @@ -146,13 +402,6 @@ fn test_get_cloned_poison() { } } -#[test] -fn test_get_mut() { - let mut m = Mutex::new(NonCopy(10)); - *m.get_mut().unwrap() = NonCopy(20); - assert_eq!(m.into_inner().unwrap(), NonCopy(20)); -} - #[test] #[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] fn test_get_mut_poison() { @@ -164,23 +413,6 @@ fn test_get_mut_poison() { } } -#[test] -fn test_set() { - fn inner(mut init: impl FnMut() -> T, mut value: impl FnMut() -> T) - where - T: Debug + Eq, - { - let m = Mutex::new(init()); - - assert_eq!(*m.lock().unwrap(), init()); - m.set(value()).unwrap(); - assert_eq!(*m.lock().unwrap(), value()); - } - - inner(|| NonCopy(10), || NonCopy(20)); - inner(|| NonCopyNeedsDrop(10), || NonCopyNeedsDrop(20)); -} - #[test] #[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] fn test_set_poison() { @@ -203,23 +435,6 @@ fn test_set_poison() { inner(|| NonCopyNeedsDrop(10), || NonCopyNeedsDrop(20)); } -#[test] -fn test_replace() { - fn inner(mut init: impl FnMut() -> T, mut value: impl FnMut() -> T) - where - T: Debug + Eq, - { - let m = Mutex::new(init()); - - assert_eq!(*m.lock().unwrap(), init()); - assert_eq!(m.replace(value()).unwrap(), init()); - assert_eq!(*m.lock().unwrap(), value()); - } - - inner(|| NonCopy(10), || NonCopy(20)); - inner(|| NonCopyNeedsDrop(10), || NonCopyNeedsDrop(20)); -} - #[test] #[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] fn test_replace_poison() { @@ -242,32 +457,11 @@ fn test_replace_poison() { inner(|| NonCopyNeedsDrop(10), || NonCopyNeedsDrop(20)); } -#[test] -fn test_mutex_arc_condvar() { - let packet = Packet(Arc::new((Mutex::new(false), Condvar::new()))); - let packet2 = Packet(packet.0.clone()); - let (tx, rx) = channel(); - let _t = thread::spawn(move || { - // wait until parent gets in - rx.recv().unwrap(); - let &(ref lock, ref cvar) = &*packet2.0; - let mut lock = lock.lock().unwrap(); - *lock = true; - cvar.notify_one(); - }); - - let &(ref lock, ref cvar) = &*packet.0; - let mut lock = lock.lock().unwrap(); - tx.send(()).unwrap(); - assert!(!*lock); - while !*lock { - lock = cvar.wait(lock).unwrap(); - } -} - #[test] #[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] fn test_arc_condvar_poison() { + struct Packet(Arc<(Mutex, Condvar)>); + let packet = Packet(Arc::new((Mutex::new(1), Condvar::new()))); let packet2 = Packet(packet.0.clone()); let (tx, rx) = channel(); @@ -326,69 +520,6 @@ fn test_mutex_arc_poison_mapped() { assert!(arc.is_poisoned()); } -#[test] -fn test_mutex_arc_nested() { - // Tests nested mutexes and access - // to underlying data. - let arc = Arc::new(Mutex::new(1)); - let arc2 = Arc::new(Mutex::new(arc)); - let (tx, rx) = channel(); - let _t = thread::spawn(move || { - let lock = arc2.lock().unwrap(); - let lock2 = lock.lock().unwrap(); - assert_eq!(*lock2, 1); - tx.send(()).unwrap(); - }); - rx.recv().unwrap(); -} - -#[test] -#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] -fn test_mutex_arc_access_in_unwind() { - let arc = Arc::new(Mutex::new(1)); - let arc2 = arc.clone(); - let _ = thread::spawn(move || -> () { - struct Unwinder { - i: Arc>, - } - impl Drop for Unwinder { - fn drop(&mut self) { - *self.i.lock().unwrap() += 1; - } - } - let _u = Unwinder { i: arc2 }; - panic!(); - }) - .join(); - let lock = arc.lock().unwrap(); - assert_eq!(*lock, 2); -} - -#[test] -fn test_mutex_unsized() { - let mutex: &Mutex<[i32]> = &Mutex::new([1, 2, 3]); - { - let b = &mut *mutex.lock().unwrap(); - b[0] = 4; - b[2] = 5; - } - let comp: &[i32] = &[4, 2, 5]; - assert_eq!(&*mutex.lock().unwrap(), comp); -} - -#[test] -fn test_mapping_mapped_guard() { - let arr = [0; 4]; - let mut lock = Mutex::new(arr); - let guard = lock.lock().unwrap(); - let guard = MutexGuard::map(guard, |arr| &mut arr[..2]); - let mut guard = MappedMutexGuard::map(guard, |slice| &mut slice[1..]); - assert_eq!(guard.len(), 1); - guard[0] = 42; - drop(guard); - assert_eq!(*lock.get_mut().unwrap(), [0, 42, 0, 0]); -} - #[test] #[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] fn panic_while_mapping_unlocked_poison() { diff --git a/library/stdarch/crates/std_detect/Cargo.toml b/library/std_detect/Cargo.toml similarity index 76% rename from library/stdarch/crates/std_detect/Cargo.toml rename to library/std_detect/Cargo.toml index f990e72412528..8d91454726bce 100644 --- a/library/stdarch/crates/std_detect/Cargo.toml +++ b/library/std_detect/Cargo.toml @@ -22,20 +22,14 @@ maintenance = { status = "experimental" } [dependencies] cfg-if = "1.0.0" - -# When built as part of libstd -core = { version = "1.0.0", optional = true, package = "rustc-std-workspace-core" } -alloc = { version = "1.0.0", optional = true, package = "rustc-std-workspace-alloc" } +core = { path = "../core" } +alloc = { path = "../alloc" } [target.'cfg(not(windows))'.dependencies] libc = { version = "0.2.0", optional = true, default-features = false } [features] -default = [ "std_detect_dlsym_getauxval", "std_detect_file_io" ] +default = [] std_detect_file_io = [ "libc" ] std_detect_dlsym_getauxval = [ "libc" ] std_detect_env_override = [ "libc" ] -rustc-dep-of-std = [ - "core", - "alloc", -] diff --git a/library/stdarch/crates/std_detect/README.md b/library/std_detect/README.md similarity index 99% rename from library/stdarch/crates/std_detect/README.md rename to library/std_detect/README.md index 091f5542e0e87..edc90d319a1da 100644 --- a/library/stdarch/crates/std_detect/README.md +++ b/library/std_detect/README.md @@ -55,7 +55,7 @@ crate from working on applications in which `std` is not available. application. * Linux/Android: - * `arm{32, 64}`, `mips{32,64}{,el}`, `powerpc{32,64}{,le}`, `loongarch64`, `s390x`: + * `arm{32, 64}`, `mips{32,64}{,el}`, `powerpc{32,64}{,le}`, `loongarch{32,64}`, `s390x`: `std_detect` supports these on Linux by querying ELF auxiliary vectors (using `getauxval` when available), and if that fails, by querying `/proc/self/auxv`. * `arm64`: partial support for doing run-time feature detection by directly diff --git a/library/stdarch/crates/std_detect/src/detect/arch/aarch64.rs b/library/std_detect/src/detect/arch/aarch64.rs similarity index 100% rename from library/stdarch/crates/std_detect/src/detect/arch/aarch64.rs rename to library/std_detect/src/detect/arch/aarch64.rs diff --git a/library/stdarch/crates/std_detect/src/detect/arch/arm.rs b/library/std_detect/src/detect/arch/arm.rs similarity index 100% rename from library/stdarch/crates/std_detect/src/detect/arch/arm.rs rename to library/std_detect/src/detect/arch/arm.rs diff --git a/library/stdarch/crates/std_detect/src/detect/arch/loongarch.rs b/library/std_detect/src/detect/arch/loongarch.rs similarity index 96% rename from library/stdarch/crates/std_detect/src/detect/arch/loongarch.rs rename to library/std_detect/src/detect/arch/loongarch.rs index e9d68f6a9bf7f..68fc600fa8e79 100644 --- a/library/stdarch/crates/std_detect/src/detect/arch/loongarch.rs +++ b/library/std_detect/src/detect/arch/loongarch.rs @@ -2,7 +2,7 @@ features! { @TARGET: loongarch; - @CFG: target_arch = "loongarch64"; + @CFG: any(target_arch = "loongarch32", target_arch = "loongarch64"); @MACRO_NAME: is_loongarch_feature_detected; @MACRO_ATTRS: /// Checks if `loongarch` feature is enabled. diff --git a/library/stdarch/crates/std_detect/src/detect/arch/mips.rs b/library/std_detect/src/detect/arch/mips.rs similarity index 100% rename from library/stdarch/crates/std_detect/src/detect/arch/mips.rs rename to library/std_detect/src/detect/arch/mips.rs diff --git a/library/stdarch/crates/std_detect/src/detect/arch/mips64.rs b/library/std_detect/src/detect/arch/mips64.rs similarity index 100% rename from library/stdarch/crates/std_detect/src/detect/arch/mips64.rs rename to library/std_detect/src/detect/arch/mips64.rs diff --git a/library/stdarch/crates/std_detect/src/detect/arch/mod.rs b/library/std_detect/src/detect/arch/mod.rs similarity index 96% rename from library/stdarch/crates/std_detect/src/detect/arch/mod.rs rename to library/std_detect/src/detect/arch/mod.rs index d5a13acc02826..b0be554ed898f 100644 --- a/library/stdarch/crates/std_detect/src/detect/arch/mod.rs +++ b/library/std_detect/src/detect/arch/mod.rs @@ -49,7 +49,7 @@ cfg_if! { } else if #[cfg(target_arch = "mips64")] { #[unstable(feature = "stdarch_mips_feature_detection", issue = "111188")] pub use mips64::*; - } else if #[cfg(target_arch = "loongarch64")] { + } else if #[cfg(any(target_arch = "loongarch32", target_arch = "loongarch64"))] { #[stable(feature = "stdarch_loongarch_feature", since = "1.89.0")] pub use loongarch::*; } else if #[cfg(target_arch = "s390x")] { diff --git a/library/stdarch/crates/std_detect/src/detect/arch/powerpc.rs b/library/std_detect/src/detect/arch/powerpc.rs similarity index 100% rename from library/stdarch/crates/std_detect/src/detect/arch/powerpc.rs rename to library/std_detect/src/detect/arch/powerpc.rs diff --git a/library/stdarch/crates/std_detect/src/detect/arch/powerpc64.rs b/library/std_detect/src/detect/arch/powerpc64.rs similarity index 100% rename from library/stdarch/crates/std_detect/src/detect/arch/powerpc64.rs rename to library/std_detect/src/detect/arch/powerpc64.rs diff --git a/library/stdarch/crates/std_detect/src/detect/arch/riscv.rs b/library/std_detect/src/detect/arch/riscv.rs similarity index 100% rename from library/stdarch/crates/std_detect/src/detect/arch/riscv.rs rename to library/std_detect/src/detect/arch/riscv.rs diff --git a/library/stdarch/crates/std_detect/src/detect/arch/s390x.rs b/library/std_detect/src/detect/arch/s390x.rs similarity index 100% rename from library/stdarch/crates/std_detect/src/detect/arch/s390x.rs rename to library/std_detect/src/detect/arch/s390x.rs diff --git a/library/stdarch/crates/std_detect/src/detect/arch/x86.rs b/library/std_detect/src/detect/arch/x86.rs similarity index 100% rename from library/stdarch/crates/std_detect/src/detect/arch/x86.rs rename to library/std_detect/src/detect/arch/x86.rs diff --git a/library/stdarch/crates/std_detect/src/detect/bit.rs b/library/std_detect/src/detect/bit.rs similarity index 100% rename from library/stdarch/crates/std_detect/src/detect/bit.rs rename to library/std_detect/src/detect/bit.rs diff --git a/library/stdarch/crates/std_detect/src/detect/cache.rs b/library/std_detect/src/detect/cache.rs similarity index 88% rename from library/stdarch/crates/std_detect/src/detect/cache.rs rename to library/std_detect/src/detect/cache.rs index 83bcedea612e6..1a42e09146319 100644 --- a/library/stdarch/crates/std_detect/src/detect/cache.rs +++ b/library/std_detect/src/detect/cache.rs @@ -3,9 +3,7 @@ #![allow(dead_code)] // not used on all platforms -use core::sync::atomic::Ordering; - -use core::sync::atomic::AtomicUsize; +use core::sync::atomic::{AtomicUsize, Ordering}; /// Sets the `bit` of `x`. #[inline] @@ -40,20 +38,14 @@ impl Initializer { /// Tests the `bit` of the cache. #[inline] pub(crate) fn test(self, bit: u32) -> bool { - debug_assert!( - bit < CACHE_CAPACITY, - "too many features, time to increase the cache size!" - ); + debug_assert!(bit < CACHE_CAPACITY, "too many features, time to increase the cache size!"); test_bit(self.0, bit) } /// Sets the `bit` of the cache. #[inline] pub(crate) fn set(&mut self, bit: u32) { - debug_assert!( - bit < CACHE_CAPACITY, - "too many features, time to increase the cache size!" - ); + debug_assert!(bit < CACHE_CAPACITY, "too many features, time to increase the cache size!"); let v = self.0; self.0 = set_bit(v, bit); } @@ -61,10 +53,7 @@ impl Initializer { /// Unsets the `bit` of the cache. #[inline] pub(crate) fn unset(&mut self, bit: u32) { - debug_assert!( - bit < CACHE_CAPACITY, - "too many features, time to increase the cache size!" - ); + debug_assert!(bit < CACHE_CAPACITY, "too many features, time to increase the cache size!"); let v = self.0; self.0 = unset_bit(v, bit); } @@ -73,11 +62,7 @@ impl Initializer { /// This global variable is a cache of the features supported by the CPU. // Note: the third slot is only used in x86 // Another Slot can be added if needed without any change to `Initializer` -static CACHE: [Cache; 3] = [ - Cache::uninitialized(), - Cache::uninitialized(), - Cache::uninitialized(), -]; +static CACHE: [Cache; 3] = [Cache::uninitialized(), Cache::uninitialized(), Cache::uninitialized()]; /// Feature cache with capacity for `size_of::() * 8 - 1` features. /// @@ -104,19 +89,14 @@ impl Cache { #[inline] pub(crate) fn test(&self, bit: u32) -> Option { let cached = self.0.load(Ordering::Relaxed); - if cached == 0 { - None - } else { - Some(test_bit(cached as u128, bit)) - } + if cached == 0 { None } else { Some(test_bit(cached as u128, bit)) } } /// Initializes the cache. #[inline] fn initialize(&self, value: usize) -> usize { debug_assert_eq!((value & !Cache::MASK), 0); - self.0 - .store(value | Cache::INITIALIZED_BIT, Ordering::Relaxed); + self.0.store(value | Cache::INITIALIZED_BIT, Ordering::Relaxed); value } } @@ -217,7 +197,5 @@ pub(crate) fn test(bit: u32) -> bool { } else { (bit - 2 * Cache::CAPACITY, 2) }; - CACHE[idx] - .test(relative_bit) - .unwrap_or_else(|| detect_and_initialize().test(bit)) + CACHE[idx].test(relative_bit).unwrap_or_else(|| detect_and_initialize().test(bit)) } diff --git a/library/stdarch/crates/std_detect/src/detect/macros.rs b/library/std_detect/src/detect/macros.rs similarity index 99% rename from library/stdarch/crates/std_detect/src/detect/macros.rs rename to library/std_detect/src/detect/macros.rs index a2994fb7daa7a..17140e15653d2 100644 --- a/library/stdarch/crates/std_detect/src/detect/macros.rs +++ b/library/std_detect/src/detect/macros.rs @@ -131,14 +131,13 @@ macro_rules! features { }; } - #[test] #[deny(unexpected_cfgs)] #[deny(unfulfilled_lint_expectations)] - fn unexpected_cfgs() { + const _: () = { $( check_cfg_feature!($feature, $feature_lit $(, without cfg check: $feature_cfg_check)? $(: $($target_feature_lit),*)?); )* - } + }; /// Each variant denotes a position in a bitset for a particular feature. /// diff --git a/library/stdarch/crates/std_detect/src/detect/mod.rs b/library/std_detect/src/detect/mod.rs similarity index 99% rename from library/stdarch/crates/std_detect/src/detect/mod.rs rename to library/std_detect/src/detect/mod.rs index 8fd3d95793288..f936a5a1345d9 100644 --- a/library/stdarch/crates/std_detect/src/detect/mod.rs +++ b/library/std_detect/src/detect/mod.rs @@ -29,7 +29,6 @@ mod arch; #[doc(hidden)] #[unstable(feature = "stdarch_internal", issue = "none")] pub use self::arch::__is_feature_detected; - pub(crate) use self::arch::Feature; mod bit; @@ -103,6 +102,7 @@ pub fn features() -> impl Iterator { target_arch = "powerpc64", target_arch = "mips", target_arch = "mips64", + target_arch = "loongarch32", target_arch = "loongarch64", target_arch = "s390x", ))] { diff --git a/library/stdarch/crates/std_detect/src/detect/os/aarch64.rs b/library/std_detect/src/detect/os/aarch64.rs similarity index 97% rename from library/stdarch/crates/std_detect/src/detect/os/aarch64.rs rename to library/std_detect/src/detect/os/aarch64.rs index 1ff2a17e6e1e5..c2c754ccf8db2 100644 --- a/library/stdarch/crates/std_detect/src/detect/os/aarch64.rs +++ b/library/std_detect/src/detect/os/aarch64.rs @@ -17,9 +17,10 @@ //! - [Linux documentation](https://www.kernel.org/doc/Documentation/arm64/cpu-feature-registers.txt) //! - [ARM documentation](https://developer.arm.com/documentation/ddi0601/2022-12/AArch64-Registers?lang=en) -use crate::detect::{Feature, cache}; use core::arch::asm; +use crate::detect::{Feature, cache}; + /// Try to read the features from the system registers. /// /// This will cause SIGILL if the current OS is not trapping the mrs instruction. @@ -104,10 +105,7 @@ pub(crate) fn parse_system_registers( let sha2 = bits_shift(aa64isar0, 15, 12) >= 1; enable_feature(Feature::sha2, asimd && sha1 && sha2); enable_feature(Feature::rdm, asimd && bits_shift(aa64isar0, 31, 28) >= 1); - enable_feature( - Feature::dotprod, - asimd && bits_shift(aa64isar0, 47, 44) >= 1, - ); + enable_feature(Feature::dotprod, asimd && bits_shift(aa64isar0, 47, 44) >= 1); enable_feature(Feature::sve, asimd && bits_shift(aa64pfr0, 35, 32) >= 1); } diff --git a/library/stdarch/crates/std_detect/src/detect/os/darwin/aarch64.rs b/library/std_detect/src/detect/os/darwin/aarch64.rs similarity index 97% rename from library/stdarch/crates/std_detect/src/detect/os/darwin/aarch64.rs rename to library/std_detect/src/detect/os/darwin/aarch64.rs index 44d921689e5a4..f5409361d93b9 100644 --- a/library/stdarch/crates/std_detect/src/detect/os/darwin/aarch64.rs +++ b/library/std_detect/src/detect/os/darwin/aarch64.rs @@ -2,9 +2,10 @@ //! //! -use crate::detect::{Feature, cache}; use core::ffi::CStr; +use crate::detect::{Feature, cache}; + #[inline] fn _sysctlbyname(name: &CStr) -> bool { use libc; @@ -14,13 +15,7 @@ fn _sysctlbyname(name: &CStr) -> bool { let enabled_ptr = &mut enabled as *mut i32 as *mut libc::c_void; let ret = unsafe { - libc::sysctlbyname( - name.as_ptr(), - enabled_ptr, - &mut enabled_len, - core::ptr::null_mut(), - 0, - ) + libc::sysctlbyname(name.as_ptr(), enabled_ptr, &mut enabled_len, core::ptr::null_mut(), 0) }; match ret { diff --git a/library/stdarch/crates/std_detect/src/detect/os/freebsd/aarch64.rs b/library/std_detect/src/detect/os/freebsd/aarch64.rs similarity index 100% rename from library/stdarch/crates/std_detect/src/detect/os/freebsd/aarch64.rs rename to library/std_detect/src/detect/os/freebsd/aarch64.rs diff --git a/library/stdarch/crates/std_detect/src/detect/os/freebsd/arm.rs b/library/std_detect/src/detect/os/freebsd/arm.rs similarity index 100% rename from library/stdarch/crates/std_detect/src/detect/os/freebsd/arm.rs rename to library/std_detect/src/detect/os/freebsd/arm.rs diff --git a/library/stdarch/crates/std_detect/src/detect/os/freebsd/auxvec.rs b/library/std_detect/src/detect/os/freebsd/auxvec.rs similarity index 94% rename from library/stdarch/crates/std_detect/src/detect/os/freebsd/auxvec.rs rename to library/std_detect/src/detect/os/freebsd/auxvec.rs index 4e72bf22d76cd..2a7b87c05d1c4 100644 --- a/library/stdarch/crates/std_detect/src/detect/os/freebsd/auxvec.rs +++ b/library/std_detect/src/detect/os/freebsd/auxvec.rs @@ -54,11 +54,8 @@ fn archauxv(key: libc::c_int) -> usize { // https://github.com/freebsd/freebsd-src/blob/release/11.4.0/sys/sys/auxv.h // FreeBSD 11 support in std has been removed in Rust 1.75 (https://github.com/rust-lang/rust/pull/114521), // so we can safely use this function. - let res = libc::elf_aux_info( - key, - &mut out as *mut libc::c_ulong as *mut libc::c_void, - OUT_LEN, - ); + let res = + libc::elf_aux_info(key, &mut out as *mut libc::c_ulong as *mut libc::c_void, OUT_LEN); // If elf_aux_info fails, `out` will be left at zero (which is the proper default value). debug_assert!(res == 0 || out == 0); } diff --git a/library/stdarch/crates/std_detect/src/detect/os/freebsd/mod.rs b/library/std_detect/src/detect/os/freebsd/mod.rs similarity index 100% rename from library/stdarch/crates/std_detect/src/detect/os/freebsd/mod.rs rename to library/std_detect/src/detect/os/freebsd/mod.rs diff --git a/library/stdarch/crates/std_detect/src/detect/os/freebsd/powerpc.rs b/library/std_detect/src/detect/os/freebsd/powerpc.rs similarity index 100% rename from library/stdarch/crates/std_detect/src/detect/os/freebsd/powerpc.rs rename to library/std_detect/src/detect/os/freebsd/powerpc.rs diff --git a/library/stdarch/crates/std_detect/src/detect/os/linux/aarch64.rs b/library/std_detect/src/detect/os/linux/aarch64.rs similarity index 84% rename from library/stdarch/crates/std_detect/src/detect/os/linux/aarch64.rs rename to library/std_detect/src/detect/os/linux/aarch64.rs index 22a9cefff7b83..87a9d6ebb8875 100644 --- a/library/stdarch/crates/std_detect/src/detect/os/linux/aarch64.rs +++ b/library/std_detect/src/detect/os/linux/aarch64.rs @@ -343,14 +343,8 @@ impl AtHwcap { enable_feature(Feature::sve2, sve2); enable_feature(Feature::sve2p1, self.sve2p1 && sve2); // SVE2 extensions require SVE2 and crypto features - enable_feature( - Feature::sve2_aes, - self.sveaes && self.svepmull && sve2 && self.aes, - ); - enable_feature( - Feature::sve2_sm4, - self.svesm4 && sve2 && self.sm3 && self.sm4, - ); + enable_feature(Feature::sve2_aes, self.sveaes && self.svepmull && sve2 && self.aes); + enable_feature(Feature::sve2_sm4, self.svesm4 && sve2 && self.sm3 && self.sm4); enable_feature( Feature::sve2_sha3, self.svesha3 && sve2 && self.sha512 && self.sha3 && self.sha1 && self.sha2, @@ -401,84 +395,4 @@ impl AtHwcap { #[cfg(target_endian = "little")] #[cfg(test)] -mod tests { - use super::*; - - #[cfg(feature = "std_detect_file_io")] - mod auxv_from_file { - use super::auxvec::auxv_from_file; - use super::*; - // The baseline hwcaps used in the (artificial) auxv test files. - fn baseline_hwcaps() -> AtHwcap { - AtHwcap { - fp: true, - asimd: true, - aes: true, - pmull: true, - sha1: true, - sha2: true, - crc32: true, - atomics: true, - fphp: true, - asimdhp: true, - asimdrdm: true, - lrcpc: true, - dcpop: true, - asimddp: true, - ssbs: true, - ..AtHwcap::default() - } - } - - #[test] - fn linux_empty_hwcap2_aarch64() { - let file = concat!( - env!("CARGO_MANIFEST_DIR"), - "/src/detect/test_data/linux-empty-hwcap2-aarch64.auxv" - ); - println!("file: {file}"); - let v = auxv_from_file(file).unwrap(); - println!("HWCAP : 0x{:0x}", v.hwcap); - println!("HWCAP2: 0x{:0x}", v.hwcap2); - assert_eq!(AtHwcap::from(v), baseline_hwcaps()); - } - #[test] - fn linux_no_hwcap2_aarch64() { - let file = concat!( - env!("CARGO_MANIFEST_DIR"), - "/src/detect/test_data/linux-no-hwcap2-aarch64.auxv" - ); - println!("file: {file}"); - let v = auxv_from_file(file).unwrap(); - println!("HWCAP : 0x{:0x}", v.hwcap); - println!("HWCAP2: 0x{:0x}", v.hwcap2); - assert_eq!(AtHwcap::from(v), baseline_hwcaps()); - } - #[test] - fn linux_hwcap2_aarch64() { - let file = concat!( - env!("CARGO_MANIFEST_DIR"), - "/src/detect/test_data/linux-hwcap2-aarch64.auxv" - ); - println!("file: {file}"); - let v = auxv_from_file(file).unwrap(); - println!("HWCAP : 0x{:0x}", v.hwcap); - println!("HWCAP2: 0x{:0x}", v.hwcap2); - assert_eq!( - AtHwcap::from(v), - AtHwcap { - // Some other HWCAP bits. - paca: true, - pacg: true, - // HWCAP2-only bits. - dcpodp: true, - frint: true, - rng: true, - bti: true, - mte: true, - ..baseline_hwcaps() - } - ); - } - } -} +mod tests; diff --git a/library/std_detect/src/detect/os/linux/aarch64/tests.rs b/library/std_detect/src/detect/os/linux/aarch64/tests.rs new file mode 100644 index 0000000000000..a3562f2fd9362 --- /dev/null +++ b/library/std_detect/src/detect/os/linux/aarch64/tests.rs @@ -0,0 +1,77 @@ +use super::*; + +#[cfg(feature = "std_detect_file_io")] +mod auxv_from_file { + use super::auxvec::auxv_from_file; + use super::*; + // The baseline hwcaps used in the (artificial) auxv test files. + fn baseline_hwcaps() -> AtHwcap { + AtHwcap { + fp: true, + asimd: true, + aes: true, + pmull: true, + sha1: true, + sha2: true, + crc32: true, + atomics: true, + fphp: true, + asimdhp: true, + asimdrdm: true, + lrcpc: true, + dcpop: true, + asimddp: true, + ssbs: true, + ..AtHwcap::default() + } + } + + #[test] + fn linux_empty_hwcap2_aarch64() { + let file = concat!( + env!("CARGO_MANIFEST_DIR"), + "/src/detect/test_data/linux-empty-hwcap2-aarch64.auxv" + ); + println!("file: {file}"); + let v = auxv_from_file(file).unwrap(); + println!("HWCAP : 0x{:0x}", v.hwcap); + println!("HWCAP2: 0x{:0x}", v.hwcap2); + assert_eq!(AtHwcap::from(v), baseline_hwcaps()); + } + #[test] + fn linux_no_hwcap2_aarch64() { + let file = concat!( + env!("CARGO_MANIFEST_DIR"), + "/src/detect/test_data/linux-no-hwcap2-aarch64.auxv" + ); + println!("file: {file}"); + let v = auxv_from_file(file).unwrap(); + println!("HWCAP : 0x{:0x}", v.hwcap); + println!("HWCAP2: 0x{:0x}", v.hwcap2); + assert_eq!(AtHwcap::from(v), baseline_hwcaps()); + } + #[test] + fn linux_hwcap2_aarch64() { + let file = + concat!(env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/linux-hwcap2-aarch64.auxv"); + println!("file: {file}"); + let v = auxv_from_file(file).unwrap(); + println!("HWCAP : 0x{:0x}", v.hwcap); + println!("HWCAP2: 0x{:0x}", v.hwcap2); + assert_eq!( + AtHwcap::from(v), + AtHwcap { + // Some other HWCAP bits. + paca: true, + pacg: true, + // HWCAP2-only bits. + dcpodp: true, + frint: true, + rng: true, + bti: true, + mte: true, + ..baseline_hwcaps() + } + ); + } +} diff --git a/library/stdarch/crates/std_detect/src/detect/os/linux/arm.rs b/library/std_detect/src/detect/os/linux/arm.rs similarity index 100% rename from library/stdarch/crates/std_detect/src/detect/os/linux/arm.rs rename to library/std_detect/src/detect/os/linux/arm.rs diff --git a/library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs b/library/std_detect/src/detect/os/linux/auxvec.rs similarity index 66% rename from library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs rename to library/std_detect/src/detect/os/linux/auxvec.rs index c30379ff06554..443caaaa1863e 100644 --- a/library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs +++ b/library/std_detect/src/detect/os/linux/auxvec.rs @@ -80,6 +80,7 @@ pub(crate) fn auxv() -> Result { target_arch = "riscv64", target_arch = "mips", target_arch = "mips64", + target_arch = "loongarch32", target_arch = "loongarch64", ))] { @@ -118,7 +119,7 @@ pub(crate) fn auxv() -> Result { { // If calling getauxval fails, try to read the auxiliary vector from // its file: - auxv_from_file("/proc/self/auxv") + auxv_from_file("/proc/self/auxv").map_err(|_| ()) } #[cfg(not(feature = "std_detect_file_io"))] { @@ -156,17 +157,22 @@ fn getauxval(key: usize) -> Result { /// Tries to read the auxiliary vector from the `file`. If this fails, this /// function returns `Err`. #[cfg(feature = "std_detect_file_io")] -pub(super) fn auxv_from_file(file: &str) -> Result { +pub(super) fn auxv_from_file(file: &str) -> Result { let file = super::read_file(file)?; + auxv_from_file_bytes(&file) +} +/// Read auxiliary vector from a slice of bytes. +#[cfg(feature = "std_detect_file_io")] +pub(super) fn auxv_from_file_bytes(bytes: &[u8]) -> Result { // See . // // The auxiliary vector contains at most 34 (key,value) fields: from // `AT_MINSIGSTKSZ` to `AT_NULL`, but its number may increase. - let len = file.len(); + let len = bytes.len(); let mut buf = alloc::vec![0_usize; 1 + len / core::mem::size_of::()]; unsafe { - core::ptr::copy_nonoverlapping(file.as_ptr(), buf.as_mut_ptr() as *mut u8, len); + core::ptr::copy_nonoverlapping(bytes.as_ptr(), buf.as_mut_ptr() as *mut u8, len); } auxv_from_buf(&buf) @@ -175,13 +181,14 @@ pub(super) fn auxv_from_file(file: &str) -> Result { /// Tries to interpret the `buffer` as an auxiliary vector. If that fails, this /// function returns `Err`. #[cfg(feature = "std_detect_file_io")] -fn auxv_from_buf(buf: &[usize]) -> Result { +fn auxv_from_buf(buf: &[usize]) -> Result { // Targets with only AT_HWCAP: #[cfg(any( target_arch = "riscv32", target_arch = "riscv64", target_arch = "mips", target_arch = "mips64", + target_arch = "loongarch32", target_arch = "loongarch64", ))] { @@ -220,120 +227,8 @@ fn auxv_from_buf(buf: &[usize]) -> Result { } // Suppress unused variable let _ = buf; - Err(()) + Err(alloc::string::String::from("hwcap not found")) } #[cfg(test)] -mod tests { - use super::*; - - // FIXME: on mips/mips64 getauxval returns 0, and /proc/self/auxv - // does not always contain the AT_HWCAP key under qemu. - #[cfg(any( - target_arch = "arm", - target_arch = "powerpc", - target_arch = "powerpc64", - target_arch = "s390x", - ))] - #[test] - fn auxv_crate() { - let v = auxv(); - if let Ok(hwcap) = getauxval(AT_HWCAP) { - let rt_hwcap = v.expect("failed to find hwcap key").hwcap; - assert_eq!(rt_hwcap, hwcap); - } - - // Targets with AT_HWCAP and AT_HWCAP2: - #[cfg(any( - target_arch = "aarch64", - target_arch = "arm", - target_arch = "powerpc", - target_arch = "powerpc64", - target_arch = "s390x", - ))] - { - if let Ok(hwcap2) = getauxval(AT_HWCAP2) { - let rt_hwcap2 = v.expect("failed to find hwcap2 key").hwcap2; - assert_eq!(rt_hwcap2, hwcap2); - } - } - } - - #[test] - fn auxv_dump() { - if let Ok(auxvec) = auxv() { - println!("{:?}", auxvec); - } else { - println!("both getauxval() and reading /proc/self/auxv failed!"); - } - } - - #[cfg(feature = "std_detect_file_io")] - cfg_if::cfg_if! { - if #[cfg(target_arch = "arm")] { - #[test] - fn linux_rpi3() { - let file = concat!(env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/linux-rpi3.auxv"); - println!("file: {file}"); - let v = auxv_from_file(file).unwrap(); - assert_eq!(v.hwcap, 4174038); - assert_eq!(v.hwcap2, 16); - } - - #[test] - fn linux_macos_vb() { - let file = concat!(env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/macos-virtualbox-linux-x86-4850HQ.auxv"); - println!("file: {file}"); - // The file contains HWCAP but not HWCAP2. In that case, we treat HWCAP2 as zero. - let v = auxv_from_file(file).unwrap(); - assert_eq!(v.hwcap, 126614527); - assert_eq!(v.hwcap2, 0); - } - } else if #[cfg(target_arch = "aarch64")] { - #[cfg(target_endian = "little")] - #[test] - fn linux_artificial_aarch64() { - let file = concat!(env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/linux-artificial-aarch64.auxv"); - println!("file: {file}"); - let v = auxv_from_file(file).unwrap(); - assert_eq!(v.hwcap, 0x0123456789abcdef); - assert_eq!(v.hwcap2, 0x02468ace13579bdf); - } - #[cfg(target_endian = "little")] - #[test] - fn linux_no_hwcap2_aarch64() { - let file = concat!(env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/linux-no-hwcap2-aarch64.auxv"); - println!("file: {file}"); - let v = auxv_from_file(file).unwrap(); - // An absent HWCAP2 is treated as zero, and does not prevent acceptance of HWCAP. - assert_ne!(v.hwcap, 0); - assert_eq!(v.hwcap2, 0); - } - } - } - - #[test] - #[cfg(feature = "std_detect_file_io")] - fn auxv_dump_procfs() { - if let Ok(auxvec) = auxv_from_file("/proc/self/auxv") { - println!("{:?}", auxvec); - } else { - println!("reading /proc/self/auxv failed!"); - } - } - - #[cfg(any( - target_arch = "aarch64", - target_arch = "arm", - target_arch = "powerpc", - target_arch = "powerpc64", - target_arch = "s390x", - ))] - #[test] - #[cfg(feature = "std_detect_file_io")] - fn auxv_crate_procfs() { - if let Ok(procfs_auxv) = auxv_from_file("/proc/self/auxv") { - assert_eq!(auxv().unwrap(), procfs_auxv); - } - } -} +mod tests; diff --git a/library/std_detect/src/detect/os/linux/auxvec/tests.rs b/library/std_detect/src/detect/os/linux/auxvec/tests.rs new file mode 100644 index 0000000000000..536615fa27251 --- /dev/null +++ b/library/std_detect/src/detect/os/linux/auxvec/tests.rs @@ -0,0 +1,109 @@ +use super::*; + +// FIXME: on mips/mips64 getauxval returns 0, and /proc/self/auxv +// does not always contain the AT_HWCAP key under qemu. +#[cfg(any( + target_arch = "arm", + target_arch = "powerpc", + target_arch = "powerpc64", + target_arch = "s390x", +))] +#[test] +fn auxv_crate() { + let v = auxv(); + if let Ok(hwcap) = getauxval(AT_HWCAP) { + let rt_hwcap = v.expect("failed to find hwcap key").hwcap; + assert_eq!(rt_hwcap, hwcap); + } + + // Targets with AT_HWCAP and AT_HWCAP2: + #[cfg(any( + target_arch = "aarch64", + target_arch = "arm", + target_arch = "powerpc", + target_arch = "powerpc64", + target_arch = "s390x", + ))] + { + if let Ok(hwcap2) = getauxval(AT_HWCAP2) { + let rt_hwcap2 = v.expect("failed to find hwcap2 key").hwcap2; + assert_eq!(rt_hwcap2, hwcap2); + } + } +} + +#[test] +fn auxv_dump() { + if let Ok(auxvec) = auxv() { + println!("{:?}", auxvec); + } else { + println!("both getauxval() and reading /proc/self/auxv failed!"); + } +} + +#[cfg(feature = "std_detect_file_io")] +cfg_if::cfg_if! { + if #[cfg(target_arch = "arm")] { + // The tests below can be executed under qemu, where we do not have access to the test + // files on disk, so we need to embed them with `include_bytes!`. + #[test] + fn linux_rpi3() { + let auxv = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/linux-rpi3.auxv")); + let v = auxv_from_file_bytes(auxv).unwrap(); + assert_eq!(v.hwcap, 4174038); + assert_eq!(v.hwcap2, 16); + } + + #[test] + fn linux_macos_vb() { + let auxv = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/macos-virtualbox-linux-x86-4850HQ.auxv")); + // The file contains HWCAP but not HWCAP2. In that case, we treat HWCAP2 as zero. + let v = auxv_from_file_bytes(auxv).unwrap(); + assert_eq!(v.hwcap, 126614527); + assert_eq!(v.hwcap2, 0); + } + } else if #[cfg(target_arch = "aarch64")] { + #[cfg(target_endian = "little")] + #[test] + fn linux_artificial_aarch64() { + let auxv = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/linux-artificial-aarch64.auxv")); + let v = auxv_from_file_bytes(auxv).unwrap(); + assert_eq!(v.hwcap, 0x0123456789abcdef); + assert_eq!(v.hwcap2, 0x02468ace13579bdf); + } + #[cfg(target_endian = "little")] + #[test] + fn linux_no_hwcap2_aarch64() { + let auxv = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/linux-no-hwcap2-aarch64.auxv")); + let v = auxv_from_file_bytes(auxv).unwrap(); + // An absent HWCAP2 is treated as zero, and does not prevent acceptance of HWCAP. + assert_ne!(v.hwcap, 0); + assert_eq!(v.hwcap2, 0); + } + } +} + +#[test] +#[cfg(feature = "std_detect_file_io")] +fn auxv_dump_procfs() { + if let Ok(auxvec) = auxv_from_file("/proc/self/auxv") { + println!("{:?}", auxvec); + } else { + println!("reading /proc/self/auxv failed!"); + } +} + +#[cfg(any( + target_arch = "aarch64", + target_arch = "arm", + target_arch = "powerpc", + target_arch = "powerpc64", + target_arch = "s390x", +))] +#[test] +#[cfg(feature = "std_detect_file_io")] +fn auxv_crate_procfs() { + if let Ok(procfs_auxv) = auxv_from_file("/proc/self/auxv") { + assert_eq!(auxv().unwrap(), procfs_auxv); + } +} diff --git a/library/stdarch/crates/std_detect/src/detect/os/linux/loongarch.rs b/library/std_detect/src/detect/os/linux/loongarch.rs similarity index 88% rename from library/stdarch/crates/std_detect/src/detect/os/linux/loongarch.rs rename to library/std_detect/src/detect/os/linux/loongarch.rs index 14cc7a7318354..e97fda11d08fc 100644 --- a/library/stdarch/crates/std_detect/src/detect/os/linux/loongarch.rs +++ b/library/std_detect/src/detect/os/linux/loongarch.rs @@ -1,8 +1,9 @@ //! Run-time feature detection for LoongArch on Linux. +use core::arch::asm; + use super::auxvec; use crate::detect::{Feature, bit, cache}; -use core::arch::asm; /// Try to read the features from the auxiliary vector. pub(crate) fn detect_features() -> cache::Initializer { @@ -43,16 +44,8 @@ pub(crate) fn detect_features() -> cache::Initializer { // // [hwcap]: https://github.com/torvalds/linux/blob/master/arch/loongarch/include/uapi/asm/hwcap.h if let Ok(auxv) = auxvec::auxv() { - enable_feature( - &mut value, - Feature::f, - bit::test(cpucfg2, 1) && bit::test(auxv.hwcap, 3), - ); - enable_feature( - &mut value, - Feature::d, - bit::test(cpucfg2, 2) && bit::test(auxv.hwcap, 3), - ); + enable_feature(&mut value, Feature::f, bit::test(cpucfg2, 1) && bit::test(auxv.hwcap, 3)); + enable_feature(&mut value, Feature::d, bit::test(cpucfg2, 2) && bit::test(auxv.hwcap, 3)); enable_feature(&mut value, Feature::lsx, bit::test(auxv.hwcap, 4)); enable_feature(&mut value, Feature::lasx, bit::test(auxv.hwcap, 5)); enable_feature( diff --git a/library/stdarch/crates/std_detect/src/detect/os/linux/mips.rs b/library/std_detect/src/detect/os/linux/mips.rs similarity index 100% rename from library/stdarch/crates/std_detect/src/detect/os/linux/mips.rs rename to library/std_detect/src/detect/os/linux/mips.rs diff --git a/library/stdarch/crates/std_detect/src/detect/os/linux/mod.rs b/library/std_detect/src/detect/os/linux/mod.rs similarity index 82% rename from library/stdarch/crates/std_detect/src/detect/os/linux/mod.rs rename to library/std_detect/src/detect/os/linux/mod.rs index 8c689d0b1f0e8..5ae2aaeab5b8a 100644 --- a/library/stdarch/crates/std_detect/src/detect/os/linux/mod.rs +++ b/library/std_detect/src/detect/os/linux/mod.rs @@ -6,14 +6,16 @@ use alloc::vec::Vec; mod auxvec; #[cfg(feature = "std_detect_file_io")] -fn read_file(path: &str) -> Result, ()> { - let mut path = Vec::from(path.as_bytes()); +fn read_file(orig_path: &str) -> Result, alloc::string::String> { + use alloc::format; + + let mut path = Vec::from(orig_path.as_bytes()); path.push(0); unsafe { let file = libc::open(path.as_ptr() as *const libc::c_char, libc::O_RDONLY); if file == -1 { - return Err(()); + return Err(format!("Cannot open file at {orig_path}")); } let mut data = Vec::new(); @@ -23,7 +25,7 @@ fn read_file(path: &str) -> Result, ()> { match libc::read(file, spare.as_mut_ptr() as *mut _, spare.len()) { -1 => { libc::close(file); - return Err(()); + return Err(format!("Error while reading from file at {orig_path}")); } 0 => break, n => data.set_len(data.len() + n as usize), @@ -51,7 +53,7 @@ cfg_if::cfg_if! { } else if #[cfg(any(target_arch = "powerpc", target_arch = "powerpc64"))] { mod powerpc; pub(crate) use self::powerpc::detect_features; - } else if #[cfg(target_arch = "loongarch64")] { + } else if #[cfg(any(target_arch = "loongarch32", target_arch = "loongarch64"))] { mod loongarch; pub(crate) use self::loongarch::detect_features; } else if #[cfg(target_arch = "s390x")] { diff --git a/library/stdarch/crates/std_detect/src/detect/os/linux/powerpc.rs b/library/std_detect/src/detect/os/linux/powerpc.rs similarity index 100% rename from library/stdarch/crates/std_detect/src/detect/os/linux/powerpc.rs rename to library/std_detect/src/detect/os/linux/powerpc.rs diff --git a/library/stdarch/crates/std_detect/src/detect/os/linux/riscv.rs b/library/std_detect/src/detect/os/linux/riscv.rs similarity index 98% rename from library/stdarch/crates/std_detect/src/detect/os/linux/riscv.rs rename to library/std_detect/src/detect/os/linux/riscv.rs index db20538af9512..dbb3664890e56 100644 --- a/library/stdarch/crates/std_detect/src/detect/os/linux/riscv.rs +++ b/library/std_detect/src/detect/os/linux/riscv.rs @@ -119,16 +119,7 @@ fn _riscv_hwprobe(out: &mut [riscv_hwprobe]) -> bool { cpus: *mut libc::c_ulong, flags: libc::c_uint, ) -> libc::c_long { - unsafe { - libc::syscall( - __NR_riscv_hwprobe, - pairs, - pair_count, - cpu_set_size, - cpus, - flags, - ) - } + unsafe { libc::syscall(__NR_riscv_hwprobe, pairs, pair_count, cpu_set_size, cpus, flags) } } unsafe { __riscv_hwprobe(out.as_mut_ptr(), out.len(), 0, ptr::null_mut(), 0) == 0 } diff --git a/library/stdarch/crates/std_detect/src/detect/os/linux/s390x.rs b/library/std_detect/src/detect/os/linux/s390x.rs similarity index 100% rename from library/stdarch/crates/std_detect/src/detect/os/linux/s390x.rs rename to library/std_detect/src/detect/os/linux/s390x.rs diff --git a/library/stdarch/crates/std_detect/src/detect/os/openbsd/aarch64.rs b/library/std_detect/src/detect/os/openbsd/aarch64.rs similarity index 98% rename from library/stdarch/crates/std_detect/src/detect/os/openbsd/aarch64.rs rename to library/std_detect/src/detect/os/openbsd/aarch64.rs index cfe4ad10ad643..2fae47b05c40a 100644 --- a/library/stdarch/crates/std_detect/src/detect/os/openbsd/aarch64.rs +++ b/library/std_detect/src/detect/os/openbsd/aarch64.rs @@ -4,8 +4,10 @@ //! https://github.com/openbsd/src/commit/d335af936b9d7dd9cf655cae1ce19560c45de6c8 //! https://github.com/golang/go/commit/cd54ef1f61945459486e9eea2f016d99ef1da925 +use core::mem::MaybeUninit; +use core::ptr; + use crate::detect::cache; -use core::{mem::MaybeUninit, ptr}; // Defined in machine/cpu.h. // https://github.com/openbsd/src/blob/72ccc03bd11da614f31f7ff76e3f6fce99bc1c79/sys/arch/arm64/include/cpu.h#L25-L40 diff --git a/library/stdarch/crates/std_detect/src/detect/os/other.rs b/library/std_detect/src/detect/os/other.rs similarity index 100% rename from library/stdarch/crates/std_detect/src/detect/os/other.rs rename to library/std_detect/src/detect/os/other.rs diff --git a/library/stdarch/crates/std_detect/src/detect/os/riscv.rs b/library/std_detect/src/detect/os/riscv.rs similarity index 69% rename from library/stdarch/crates/std_detect/src/detect/os/riscv.rs rename to library/std_detect/src/detect/os/riscv.rs index 4c59ede80293e..dc9a4036d86a1 100644 --- a/library/stdarch/crates/std_detect/src/detect/os/riscv.rs +++ b/library/std_detect/src/detect/os/riscv.rs @@ -135,69 +135,5 @@ pub(crate) fn imply_features(mut value: cache::Initializer) -> cache::Initialize } #[cfg(test)] -mod tests { - use super::*; - - #[test] - fn simple_direct() { - let mut value = cache::Initializer::default(); - value.set(Feature::f as u32); - // F (and other extensions with CSRs) -> Zicsr - assert!(imply_features(value).test(Feature::zicsr as u32)); - } - - #[test] - fn simple_indirect() { - let mut value = cache::Initializer::default(); - value.set(Feature::q as u32); - // Q -> D, D -> F, F -> Zicsr - assert!(imply_features(value).test(Feature::zicsr as u32)); - } - - #[test] - fn complex_zcd() { - let mut value = cache::Initializer::default(); - // C & D -> Zcd - value.set(Feature::c as u32); - assert!(!imply_features(value).test(Feature::zcd as u32)); - value.set(Feature::d as u32); - assert!(imply_features(value).test(Feature::zcd as u32)); - } - - #[test] - fn group_simple_forward() { - let mut value = cache::Initializer::default(); - // A -> Zalrsc & Zaamo (forward implication) - value.set(Feature::a as u32); - let value = imply_features(value); - assert!(value.test(Feature::zalrsc as u32)); - assert!(value.test(Feature::zaamo as u32)); - } - - #[test] - fn group_simple_backward() { - let mut value = cache::Initializer::default(); - // Zalrsc & Zaamo -> A (reverse implication) - value.set(Feature::zalrsc as u32); - value.set(Feature::zaamo as u32); - assert!(imply_features(value).test(Feature::a as u32)); - } - - #[test] - fn group_complex_convergence() { - let mut value = cache::Initializer::default(); - // Needs 3 iterations to converge - // (and 4th iteration for convergence checking): - // 1. [Zvksc] -> Zvks & Zvbc - // 2. Zvks -> Zvksed & Zvksh & Zvkb & Zvkt - // 3a. [Zvkned] & [Zvknhb] & [Zvkb] & Zvkt -> {Zvkn} - // 3b. Zvkn & Zvbc -> {Zvknc} - value.set(Feature::zvksc as u32); - value.set(Feature::zvkned as u32); - value.set(Feature::zvknhb as u32); - value.set(Feature::zvkb as u32); - let value = imply_features(value); - assert!(value.test(Feature::zvkn as u32)); - assert!(value.test(Feature::zvknc as u32)); - } -} +#[path = "riscv/tests.rs"] +mod tests; diff --git a/library/std_detect/src/detect/os/riscv/tests.rs b/library/std_detect/src/detect/os/riscv/tests.rs new file mode 100644 index 0000000000000..99a81dee05a6c --- /dev/null +++ b/library/std_detect/src/detect/os/riscv/tests.rs @@ -0,0 +1,64 @@ +use super::*; + +#[test] +fn simple_direct() { + let mut value = cache::Initializer::default(); + value.set(Feature::f as u32); + // F (and other extensions with CSRs) -> Zicsr + assert!(imply_features(value).test(Feature::zicsr as u32)); +} + +#[test] +fn simple_indirect() { + let mut value = cache::Initializer::default(); + value.set(Feature::q as u32); + // Q -> D, D -> F, F -> Zicsr + assert!(imply_features(value).test(Feature::zicsr as u32)); +} + +#[test] +fn complex_zcd() { + let mut value = cache::Initializer::default(); + // C & D -> Zcd + value.set(Feature::c as u32); + assert!(!imply_features(value).test(Feature::zcd as u32)); + value.set(Feature::d as u32); + assert!(imply_features(value).test(Feature::zcd as u32)); +} + +#[test] +fn group_simple_forward() { + let mut value = cache::Initializer::default(); + // A -> Zalrsc & Zaamo (forward implication) + value.set(Feature::a as u32); + let value = imply_features(value); + assert!(value.test(Feature::zalrsc as u32)); + assert!(value.test(Feature::zaamo as u32)); +} + +#[test] +fn group_simple_backward() { + let mut value = cache::Initializer::default(); + // Zalrsc & Zaamo -> A (reverse implication) + value.set(Feature::zalrsc as u32); + value.set(Feature::zaamo as u32); + assert!(imply_features(value).test(Feature::a as u32)); +} + +#[test] +fn group_complex_convergence() { + let mut value = cache::Initializer::default(); + // Needs 3 iterations to converge + // (and 4th iteration for convergence checking): + // 1. [Zvksc] -> Zvks & Zvbc + // 2. Zvks -> Zvksed & Zvksh & Zvkb & Zvkt + // 3a. [Zvkned] & [Zvknhb] & [Zvkb] & Zvkt -> {Zvkn} + // 3b. Zvkn & Zvbc -> {Zvknc} + value.set(Feature::zvksc as u32); + value.set(Feature::zvkned as u32); + value.set(Feature::zvknhb as u32); + value.set(Feature::zvkb as u32); + let value = imply_features(value); + assert!(value.test(Feature::zvkn as u32)); + assert!(value.test(Feature::zvknc as u32)); +} diff --git a/library/stdarch/crates/std_detect/src/detect/os/windows/aarch64.rs b/library/std_detect/src/detect/os/windows/aarch64.rs similarity index 100% rename from library/stdarch/crates/std_detect/src/detect/os/windows/aarch64.rs rename to library/std_detect/src/detect/os/windows/aarch64.rs diff --git a/library/stdarch/crates/std_detect/src/detect/os/x86.rs b/library/std_detect/src/detect/os/x86.rs similarity index 94% rename from library/stdarch/crates/std_detect/src/detect/os/x86.rs rename to library/std_detect/src/detect/os/x86.rs index 8565c2f85e246..20f848ab05caf 100644 --- a/library/stdarch/crates/std_detect/src/detect/os/x86.rs +++ b/library/std_detect/src/detect/os/x86.rs @@ -4,7 +4,6 @@ use core::arch::x86::*; #[cfg(target_arch = "x86_64")] use core::arch::x86_64::*; - use core::mem; use crate::detect::{Feature, bit, cache}; @@ -42,12 +41,7 @@ pub(crate) fn detect_features() -> cache::Initializer { // 0x8000_0000]. - The vendor ID is stored in 12 u8 ascii chars, // returned in EBX, EDX, and ECX (in that order): let (max_basic_leaf, vendor_id) = unsafe { - let CpuidResult { - eax: max_basic_leaf, - ebx, - ecx, - edx, - } = __cpuid(0); + let CpuidResult { eax: max_basic_leaf, ebx, ecx, edx } = __cpuid(0); let vendor_id: [[u8; 4]; 3] = [ebx.to_ne_bytes(), edx.to_ne_bytes(), ecx.to_ne_bytes()]; let vendor_id: [u8; 12] = mem::transmute(vendor_id); (max_basic_leaf, vendor_id) @@ -60,11 +54,8 @@ pub(crate) fn detect_features() -> cache::Initializer { // EAX = 1, ECX = 0: Queries "Processor Info and Feature Bits"; // Contains information about most x86 features. - let CpuidResult { - ecx: proc_info_ecx, - edx: proc_info_edx, - .. - } = unsafe { __cpuid(0x0000_0001_u32) }; + let CpuidResult { ecx: proc_info_ecx, edx: proc_info_edx, .. } = + unsafe { __cpuid(0x0000_0001_u32) }; // EAX = 7: Queries "Extended Features"; // Contains information about bmi,bmi2, and avx2 support. @@ -76,11 +67,8 @@ pub(crate) fn detect_features() -> cache::Initializer { extended_features_edx_leaf_1, ) = if max_basic_leaf >= 7 { let CpuidResult { ebx, ecx, edx, .. } = unsafe { __cpuid(0x0000_0007_u32) }; - let CpuidResult { - eax: eax_1, - edx: edx_1, - .. - } = unsafe { __cpuid_count(0x0000_0007_u32, 0x0000_0001_u32) }; + let CpuidResult { eax: eax_1, edx: edx_1, .. } = + unsafe { __cpuid_count(0x0000_0007_u32, 0x0000_0001_u32) }; (ebx, ecx, edx, eax_1, edx_1) } else { (0, 0, 0, 0, 0) // CPUID does not support "Extended Features" @@ -89,10 +77,7 @@ pub(crate) fn detect_features() -> cache::Initializer { // EAX = 0x8000_0000, ECX = 0: Get Highest Extended Function Supported // - EAX returns the max leaf value for extended information, that is, // `cpuid` calls in range [0x8000_0000; u32::MAX]: - let CpuidResult { - eax: extended_max_basic_leaf, - .. - } = unsafe { __cpuid(0x8000_0000_u32) }; + let CpuidResult { eax: extended_max_basic_leaf, .. } = unsafe { __cpuid(0x8000_0000_u32) }; // EAX = 0x8000_0001, ECX=0: Queries "Extended Processor Info and Feature // Bits" @@ -208,10 +193,8 @@ pub(crate) fn detect_features() -> cache::Initializer { // Processor Extended State Enumeration Sub-leaf (EAX = 0DH, // ECX = 1): if max_basic_leaf >= 0xd { - let CpuidResult { - eax: proc_extended_state1_eax, - .. - } = unsafe { __cpuid_count(0xd_u32, 1) }; + let CpuidResult { eax: proc_extended_state1_eax, .. } = + unsafe { __cpuid_count(0xd_u32, 1) }; enable(proc_extended_state1_eax, 0, Feature::xsaveopt); enable(proc_extended_state1_eax, 1, Feature::xsavec); enable(proc_extended_state1_eax, 3, Feature::xsaves); @@ -269,10 +252,8 @@ pub(crate) fn detect_features() -> cache::Initializer { enable(extended_features_edx_leaf_1, 8, Feature::amx_complex); if max_basic_leaf >= 0x1e { - let CpuidResult { - eax: amx_feature_flags_eax, - .. - } = unsafe { __cpuid_count(0x1e_u32, 1) }; + let CpuidResult { eax: amx_feature_flags_eax, .. } = + unsafe { __cpuid_count(0x1e_u32, 1) }; enable(amx_feature_flags_eax, 4, Feature::amx_fp8); enable(amx_feature_flags_eax, 5, Feature::amx_transpose); diff --git a/library/stdarch/crates/std_detect/src/detect/test_data/linux-artificial-aarch64.auxv b/library/std_detect/src/detect/test_data/linux-artificial-aarch64.auxv similarity index 100% rename from library/stdarch/crates/std_detect/src/detect/test_data/linux-artificial-aarch64.auxv rename to library/std_detect/src/detect/test_data/linux-artificial-aarch64.auxv diff --git a/library/stdarch/crates/std_detect/src/detect/test_data/linux-empty-hwcap2-aarch64.auxv b/library/std_detect/src/detect/test_data/linux-empty-hwcap2-aarch64.auxv similarity index 100% rename from library/stdarch/crates/std_detect/src/detect/test_data/linux-empty-hwcap2-aarch64.auxv rename to library/std_detect/src/detect/test_data/linux-empty-hwcap2-aarch64.auxv diff --git a/library/stdarch/crates/std_detect/src/detect/test_data/linux-hwcap2-aarch64.auxv b/library/std_detect/src/detect/test_data/linux-hwcap2-aarch64.auxv similarity index 100% rename from library/stdarch/crates/std_detect/src/detect/test_data/linux-hwcap2-aarch64.auxv rename to library/std_detect/src/detect/test_data/linux-hwcap2-aarch64.auxv diff --git a/library/stdarch/crates/std_detect/src/detect/test_data/linux-no-hwcap2-aarch64.auxv b/library/std_detect/src/detect/test_data/linux-no-hwcap2-aarch64.auxv similarity index 100% rename from library/stdarch/crates/std_detect/src/detect/test_data/linux-no-hwcap2-aarch64.auxv rename to library/std_detect/src/detect/test_data/linux-no-hwcap2-aarch64.auxv diff --git a/library/stdarch/crates/std_detect/src/detect/test_data/linux-rpi3.auxv b/library/std_detect/src/detect/test_data/linux-rpi3.auxv similarity index 100% rename from library/stdarch/crates/std_detect/src/detect/test_data/linux-rpi3.auxv rename to library/std_detect/src/detect/test_data/linux-rpi3.auxv diff --git a/library/stdarch/crates/std_detect/src/detect/test_data/macos-virtualbox-linux-x86-4850HQ.auxv b/library/std_detect/src/detect/test_data/macos-virtualbox-linux-x86-4850HQ.auxv similarity index 100% rename from library/stdarch/crates/std_detect/src/detect/test_data/macos-virtualbox-linux-x86-4850HQ.auxv rename to library/std_detect/src/detect/test_data/macos-virtualbox-linux-x86-4850HQ.auxv diff --git a/library/stdarch/crates/std_detect/src/lib.rs b/library/std_detect/src/lib.rs similarity index 100% rename from library/stdarch/crates/std_detect/src/lib.rs rename to library/std_detect/src/lib.rs diff --git a/library/stdarch/crates/std_detect/tests/cpu-detection.rs b/library/std_detect/tests/cpu-detection.rs similarity index 94% rename from library/stdarch/crates/std_detect/tests/cpu-detection.rs rename to library/std_detect/tests/cpu-detection.rs index 7976aedc75850..5ad32d83237ce 100644 --- a/library/stdarch/crates/std_detect/tests/cpu-detection.rs +++ b/library/std_detect/tests/cpu-detection.rs @@ -27,6 +27,16 @@ ), macro_use )] +#[cfg(any( + target_arch = "arm", + target_arch = "aarch64", + target_arch = "arm64ec", + target_arch = "riscv32", + target_arch = "riscv64", + target_arch = "powerpc", + target_arch = "powerpc64", + target_arch = "s390x", +))] extern crate std_detect; #[test] @@ -59,10 +69,7 @@ fn arm_linux() { } #[test] -#[cfg(all( - target_arch = "aarch64", - any(target_os = "linux", target_os = "android") -))] +#[cfg(all(target_arch = "aarch64", any(target_os = "linux", target_os = "android")))] fn aarch64_linux() { println!("asimd: {}", is_aarch64_feature_detected!("asimd")); println!("neon: {}", is_aarch64_feature_detected!("neon")); @@ -97,10 +104,7 @@ fn aarch64_linux() { println!("sve2-aes: {}", is_aarch64_feature_detected!("sve2-aes")); println!("sve2-sm4: {}", is_aarch64_feature_detected!("sve2-sm4")); println!("sve2-sha3: {}", is_aarch64_feature_detected!("sve2-sha3")); - println!( - "sve2-bitperm: {}", - is_aarch64_feature_detected!("sve2-bitperm") - ); + println!("sve2-bitperm: {}", is_aarch64_feature_detected!("sve2-bitperm")); println!("frintts: {}", is_aarch64_feature_detected!("frintts")); println!("i8mm: {}", is_aarch64_feature_detected!("i8mm")); println!("f32mm: {}", is_aarch64_feature_detected!("f32mm")); @@ -138,25 +142,13 @@ fn aarch64_linux() { println!("sme-lutv2: {}", is_aarch64_feature_detected!("sme-lutv2")); println!("sme-f8f16: {}", is_aarch64_feature_detected!("sme-f8f16")); println!("sme-f8f32: {}", is_aarch64_feature_detected!("sme-f8f32")); - println!( - "ssve-fp8fma: {}", - is_aarch64_feature_detected!("ssve-fp8fma") - ); - println!( - "ssve-fp8dot4: {}", - is_aarch64_feature_detected!("ssve-fp8dot4") - ); - println!( - "ssve-fp8dot2: {}", - is_aarch64_feature_detected!("ssve-fp8dot2") - ); + println!("ssve-fp8fma: {}", is_aarch64_feature_detected!("ssve-fp8fma")); + println!("ssve-fp8dot4: {}", is_aarch64_feature_detected!("ssve-fp8dot4")); + println!("ssve-fp8dot2: {}", is_aarch64_feature_detected!("ssve-fp8dot2")); } #[test] -#[cfg(all( - any(target_arch = "aarch64", target_arch = "arm64ec"), - target_os = "windows" -))] +#[cfg(all(any(target_arch = "aarch64", target_arch = "arm64ec"), target_os = "windows"))] fn aarch64_windows() { println!("asimd: {:?}", is_aarch64_feature_detected!("asimd")); println!("fp: {:?}", is_aarch64_feature_detected!("fp")); @@ -171,10 +163,7 @@ fn aarch64_windows() { } #[test] -#[cfg(all( - target_arch = "aarch64", - any(target_os = "freebsd", target_os = "openbsd") -))] +#[cfg(all(target_arch = "aarch64", any(target_os = "freebsd", target_os = "openbsd")))] fn aarch64_bsd() { println!("asimd: {:?}", is_aarch64_feature_detected!("asimd")); println!("pmull: {:?}", is_aarch64_feature_detected!("pmull")); @@ -236,14 +225,8 @@ fn riscv_linux() { println!("rv32e: {}", is_riscv_feature_detected!("rv32e")); println!("rv64i: {}", is_riscv_feature_detected!("rv64i")); println!("rv128i: {}", is_riscv_feature_detected!("rv128i")); - println!( - "unaligned-scalar-mem: {}", - is_riscv_feature_detected!("unaligned-scalar-mem") - ); - println!( - "unaligned-vector-mem: {}", - is_riscv_feature_detected!("unaligned-vector-mem") - ); + println!("unaligned-scalar-mem: {}", is_riscv_feature_detected!("unaligned-scalar-mem")); + println!("unaligned-vector-mem: {}", is_riscv_feature_detected!("unaligned-vector-mem")); println!("zicsr: {}", is_riscv_feature_detected!("zicsr")); println!("zicntr: {}", is_riscv_feature_detected!("zicntr")); println!("zihpm: {}", is_riscv_feature_detected!("zihpm")); @@ -336,10 +319,7 @@ fn powerpc_linux() { } #[test] -#[cfg(all( - target_arch = "powerpc64", - any(target_os = "linux", target_os = "freebsd"), -))] +#[cfg(all(target_arch = "powerpc64", any(target_os = "linux", target_os = "freebsd"),))] fn powerpc64_linux_or_freebsd() { println!("altivec: {}", is_powerpc64_feature_detected!("altivec")); println!("vsx: {}", is_powerpc64_feature_detected!("vsx")); diff --git a/library/stdarch/crates/std_detect/tests/macro_trailing_commas.rs b/library/std_detect/tests/macro_trailing_commas.rs similarity index 92% rename from library/stdarch/crates/std_detect/tests/macro_trailing_commas.rs rename to library/std_detect/tests/macro_trailing_commas.rs index fa3a23c796817..2fee0abdd575b 100644 --- a/library/stdarch/crates/std_detect/tests/macro_trailing_commas.rs +++ b/library/std_detect/tests/macro_trailing_commas.rs @@ -11,6 +11,7 @@ target_arch = "s390x", target_arch = "riscv32", target_arch = "riscv64", + target_arch = "loongarch32", target_arch = "loongarch64" ), feature(stdarch_internal) @@ -30,7 +31,7 @@ feature(stdarch_riscv_feature_detection) )] #![cfg_attr( - target_arch = "loongarch64", + any(target_arch = "loongarch32", target_arch = "loongarch64"), feature(stdarch_loongarch_feature_detection) )] @@ -45,6 +46,7 @@ target_arch = "s390x", target_arch = "riscv32", target_arch = "riscv64", + target_arch = "loongarch32", target_arch = "loongarch64" ))] #[macro_use] @@ -65,8 +67,8 @@ fn aarch64() { } #[test] -#[cfg(target_arch = "loongarch64")] -fn loongarch64() { +#[cfg(any(target_arch = "loongarch32", target_arch = "loongarch64"))] +fn loongarch() { let _ = is_loongarch_feature_detected!("lsx"); let _ = is_loongarch_feature_detected!("lsx",); } diff --git a/library/stdarch/crates/std_detect/tests/x86-specific.rs b/library/std_detect/tests/x86-specific.rs similarity index 85% rename from library/stdarch/crates/std_detect/tests/x86-specific.rs rename to library/std_detect/tests/x86-specific.rs index d9ec79821baf2..2ed2bb2a99ecd 100644 --- a/library/stdarch/crates/std_detect/tests/x86-specific.rs +++ b/library/std_detect/tests/x86-specific.rs @@ -1,11 +1,6 @@ #![cfg(any(target_arch = "x86", target_arch = "x86_64"))] #![allow(internal_features)] -#![feature( - stdarch_internal, - x86_amx_intrinsics, - xop_target_feature, - movrs_target_feature -)] +#![feature(stdarch_internal, x86_amx_intrinsics, xop_target_feature, movrs_target_feature)] #[macro_use] extern crate std_detect; @@ -40,24 +35,15 @@ fn dump() { println!("avx512vl: {:?}", is_x86_feature_detected!("avx512vl")); println!("avx512_ifma: {:?}", is_x86_feature_detected!("avx512ifma")); println!("avx512vbmi {:?}", is_x86_feature_detected!("avx512vbmi")); - println!( - "avx512_vpopcntdq: {:?}", - is_x86_feature_detected!("avx512vpopcntdq") - ); + println!("avx512_vpopcntdq: {:?}", is_x86_feature_detected!("avx512vpopcntdq")); println!("avx512vbmi2: {:?}", is_x86_feature_detected!("avx512vbmi2")); println!("gfni: {:?}", is_x86_feature_detected!("gfni")); println!("vaes: {:?}", is_x86_feature_detected!("vaes")); println!("vpclmulqdq: {:?}", is_x86_feature_detected!("vpclmulqdq")); println!("avx512vnni: {:?}", is_x86_feature_detected!("avx512vnni")); - println!( - "avx512bitalg: {:?}", - is_x86_feature_detected!("avx512bitalg") - ); + println!("avx512bitalg: {:?}", is_x86_feature_detected!("avx512bitalg")); println!("avx512bf16: {:?}", is_x86_feature_detected!("avx512bf16")); - println!( - "avx512vp2intersect: {:?}", - is_x86_feature_detected!("avx512vp2intersect") - ); + println!("avx512vp2intersect: {:?}", is_x86_feature_detected!("avx512vp2intersect")); println!("avx512fp16: {:?}", is_x86_feature_detected!("avx512fp16")); println!("fma: {:?}", is_x86_feature_detected!("fma")); println!("abm: {:?}", is_x86_feature_detected!("abm")); @@ -77,15 +63,9 @@ fn dump() { println!("movbe: {:?}", is_x86_feature_detected!("movbe")); println!("avxvnni: {:?}", is_x86_feature_detected!("avxvnni")); println!("avxvnniint8: {:?}", is_x86_feature_detected!("avxvnniint8")); - println!( - "avxneconvert: {:?}", - is_x86_feature_detected!("avxneconvert") - ); + println!("avxneconvert: {:?}", is_x86_feature_detected!("avxneconvert")); println!("avxifma: {:?}", is_x86_feature_detected!("avxifma")); - println!( - "avxvnniint16: {:?}", - is_x86_feature_detected!("avxvnniint16") - ); + println!("avxvnniint16: {:?}", is_x86_feature_detected!("avxvnniint16")); println!("amx-bf16: {:?}", is_x86_feature_detected!("amx-bf16")); println!("amx-tile: {:?}", is_x86_feature_detected!("amx-tile")); println!("amx-int8: {:?}", is_x86_feature_detected!("amx-int8")); @@ -96,10 +76,7 @@ fn dump() { println!("widekl: {:?}", is_x86_feature_detected!("widekl")); println!("movrs: {:?}", is_x86_feature_detected!("movrs")); println!("amx-fp8: {:?}", is_x86_feature_detected!("amx-fp8")); - println!( - "amx-transpose: {:?}", - is_x86_feature_detected!("amx-transpose") - ); + println!("amx-transpose: {:?}", is_x86_feature_detected!("amx-transpose")); println!("amx-tf32: {:?}", is_x86_feature_detected!("amx-tf32")); println!("amx-avx512: {:?}", is_x86_feature_detected!("amx-avx512")); println!("amx-movrs: {:?}", is_x86_feature_detected!("amx-movrs")); @@ -110,8 +87,5 @@ fn dump() { fn x86_deprecated() { println!("avx512gfni {:?}", is_x86_feature_detected!("avx512gfni")); println!("avx512vaes {:?}", is_x86_feature_detected!("avx512vaes")); - println!( - "avx512vpclmulqdq {:?}", - is_x86_feature_detected!("avx512vpclmulqdq") - ); + println!("avx512vpclmulqdq {:?}", is_x86_feature_detected!("avx512vpclmulqdq")); } diff --git a/library/stdarch/.github/workflows/main.yml b/library/stdarch/.github/workflows/main.yml index 8c6dee16fb618..048ce9864604e 100644 --- a/library/stdarch/.github/workflows/main.yml +++ b/library/stdarch/.github/workflows/main.yml @@ -255,22 +255,34 @@ jobs: env: TARGET: ${{ matrix.target.tuple }} - build-std-detect: + # Check that the generated files agree with the checked-in versions. + check-stdarch-gen: needs: [style] - name: Build std_detect + name: Check stdarch-gen-{arm, loongarch} output runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Install Rust - run: rustup update nightly && rustup default nightly - - run: ./ci/build-std-detect.sh + run: rustup update nightly && rustup default nightly && rustup component add rustfmt + - name: Check arm spec + run: | + cargo run --bin=stdarch-gen-arm --release -- crates/stdarch-gen-arm/spec + git diff --exit-code + - name: Check lsx.spec + run: | + cargo run --bin=stdarch-gen-loongarch --release -- crates/stdarch-gen-loongarch/lsx.spec + git diff --exit-code + - name: Check lasx.spec + run: | + cargo run --bin=stdarch-gen-loongarch --release -- crates/stdarch-gen-loongarch/lasx.spec + git diff --exit-code conclusion: needs: - docs - verify - test - - build-std-detect + - check-stdarch-gen runs-on: ubuntu-latest # We need to ensure this job does *not* get skipped if its dependencies fail, # because a skipped job is considered a success by GitHub. So we have to diff --git a/library/stdarch/.github/workflows/rustc-pull.yml b/library/stdarch/.github/workflows/rustc-pull.yml new file mode 100644 index 0000000000000..6b90d8a500f04 --- /dev/null +++ b/library/stdarch/.github/workflows/rustc-pull.yml @@ -0,0 +1,22 @@ +# Perform a subtree sync (pull) using the josh-sync tool once every few days (or on demand). +name: rustc-pull + +on: + workflow_dispatch: + schedule: + # Run at 04:00 UTC every Monday and Thursday + - cron: '0 4 * * 1,4' + +jobs: + pull: + if: github.repository == 'rust-lang/stdarch' + uses: rust-lang/josh-sync/.github/workflows/rustc-pull.yml@main + with: + # https://rust-lang.zulipchat.com/#narrow/channel/208962-t-libs.2Fstdarch/topic/Subtree.20sync.20automation/with/528461782 + zulip-stream-id: 208962 + zulip-bot-email: "stdarch-ci-bot@rust-lang.zulipchat.com" + pr-base-branch: master + branch-name: rustc-pull + secrets: + zulip-api-token: ${{ secrets.ZULIP_API_TOKEN }} + token: ${{ secrets.GITHUB_TOKEN }} diff --git a/library/stdarch/Cargo.lock b/library/stdarch/Cargo.lock index 80f424dfdd8da..21ce304db0d8a 100644 --- a/library/stdarch/Cargo.lock +++ b/library/stdarch/Cargo.lock @@ -73,20 +73,26 @@ version = "0.1.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.104", ] [[package]] name = "autocfg" -version = "1.4.0" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" + +[[package]] +name = "bitflags" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" [[package]] name = "cc" -version = "1.2.26" +version = "1.2.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "956a5e21988b87f372569b66183b78babf23ebc2e744b733e4350a752c4dafac" +checksum = "deec109607ca693028562ed836a5f1c4b8bd77755c4e132fc5ce11b0b6211ae7" dependencies = [ "shlex", ] @@ -99,9 +105,9 @@ checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" [[package]] name = "clap" -version = "4.5.40" +version = "4.5.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40b6887a1d8685cebccf115538db5c0efe625ccac9696ad45c409d96566e910f" +checksum = "be92d32e80243a54711e5d7ce823c35c41c9d929dc4ab58e1276f625841aadf9" dependencies = [ "clap_builder", "clap_derive", @@ -109,9 +115,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.40" +version = "4.5.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0c66c08ce9f0c698cbce5c0279d0bb6ac936d8674174fe48f736533b964f59e" +checksum = "707eab41e9622f9139419d573eca0900137718000c517d47da73045f54331c3d" dependencies = [ "anstream", "anstyle", @@ -121,14 +127,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.40" +version = "4.5.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2c7947ae4cc3d851207c1adb5b5e260ff0cca11446b1d6d1423788e442257ce" +checksum = "ef4f52386a59ca4c860f7393bcf8abd8dfd91ecccc0f774635ff68e92eeef491" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.104", ] [[package]] @@ -338,9 +344,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" +checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" dependencies = [ "equivalent", "hashbrown 0.15.4", @@ -403,9 +409,9 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.172" +version = "0.2.174" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" +checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" [[package]] name = "linked-hash-map" @@ -574,18 +580,6 @@ version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "989e6739f80c4ad5b13e0fd7fe89531180375b18520cc8c82080e4dc4035b84f" -[[package]] -name = "rustc-std-workspace-alloc" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9d441c3b2ebf55cebf796bfdc265d67fa09db17b7bb6bd4be75c509e1e8fec3" - -[[package]] -name = "rustc-std-workspace-core" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa9c45b374136f52f2d6311062c7146bff20fec063c3f5d46a410bd937746955" - [[package]] name = "ryu" version = "1.0.20" @@ -624,7 +618,7 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.104", ] [[package]] @@ -685,17 +679,7 @@ version = "0.1.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", -] - -[[package]] -name = "std_detect" -version = "0.1.5" -dependencies = [ - "cfg-if", - "libc", - "rustc-std-workspace-alloc", - "rustc-std-workspace-core", + "syn 2.0.104", ] [[package]] @@ -703,7 +687,6 @@ name = "stdarch-gen-arm" version = "0.1.0" dependencies = [ "itertools", - "lazy_static", "proc-macro2", "quote", "regex", @@ -727,7 +710,6 @@ dependencies = [ "assert-instr-macro", "cc", "cfg-if", - "lazy_static", "rustc-demangle", "simd-test-macro", "wasmprinter", @@ -742,7 +724,7 @@ dependencies = [ "quote", "serde", "serde_json", - "syn 2.0.102", + "syn 2.0.104", ] [[package]] @@ -752,7 +734,6 @@ dependencies = [ "core_arch", "quickcheck", "rand", - "std_detect", ] [[package]] @@ -780,9 +761,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.102" +version = "2.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6397daf94fa90f058bd0fd88429dd9e5738999cca8d701813c80723add80462" +checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" dependencies = [ "proc-macro2", "quote", @@ -834,21 +815,23 @@ checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasmparser" -version = "0.113.3" +version = "0.235.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "286049849b5a5bd09a8773171be96824afabffc7cc3df6caaf33a38db6cd07ae" +checksum = "161296c618fa2d63f6ed5fffd1112937e803cb9ec71b32b01a76321555660917" dependencies = [ - "indexmap 2.9.0", + "bitflags", + "indexmap 2.10.0", "semver", ] [[package]] name = "wasmprinter" -version = "0.2.67" +version = "0.235.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6615a5587149e753bf4b93f90fa3c3f41c88597a7a2da72879afcabeda9648f" +checksum = "75aa8e9076de6b9544e6dab4badada518cca0bf4966d35b131bbd057aed8fa0a" dependencies = [ "anyhow", + "termcolor", "wasmparser", ] @@ -945,20 +928,20 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.25" +version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb" +checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.25" +version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef" +checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.104", ] diff --git a/library/stdarch/Cargo.toml b/library/stdarch/Cargo.toml index 0db26f31a2d3b..5979096439118 100644 --- a/library/stdarch/Cargo.toml +++ b/library/stdarch/Cargo.toml @@ -5,7 +5,8 @@ members = [ "examples", ] exclude = [ - "crates/wasm-assert-instr-tests" + "crates/wasm-assert-instr-tests", + "rust_programs", ] [profile.release] diff --git a/library/stdarch/README.md b/library/stdarch/README.md index 9a35f4cd6ff58..50905b49e80d3 100644 --- a/library/stdarch/README.md +++ b/library/stdarch/README.md @@ -4,16 +4,8 @@ stdarch - Rust's standard library SIMD components [![Actions Status](https://github.com/rust-lang/stdarch/workflows/CI/badge.svg)](https://github.com/rust-lang/stdarch/actions) -# Crates - -This repository contains two main crates: - -* [`core_arch`](crates/core_arch/README.md) implements `core::arch` - Rust's - core library architecture-specific intrinsics, and +This repository contains the [`core_arch`](crates/core_arch/README.md) crate, which implements `core::arch` - Rust's core library architecture-specific intrinsics. -* [`std_detect`](crates/std_detect/README.md) implements `std::detect` - Rust's - standard library run-time CPU feature detection. - The `std::simd` component now lives in the [`packed_simd_2`](https://github.com/rust-lang/packed_simd) crate. diff --git a/library/stdarch/ci/build-std-detect.sh b/library/stdarch/ci/build-std-detect.sh deleted file mode 100755 index e79a497cc3591..0000000000000 --- a/library/stdarch/ci/build-std-detect.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env bash - -# Build std_detect on non-Linux & non-x86 targets. -# -# In std_detect, non-x86 targets have OS-specific implementations, -# but we can test only Linux in CI. This script builds targets supported -# by std_detect but cannot be tested in CI. - -set -ex -cd "$(dirname "$0")"/.. - -targets=( - # Linux - aarch64-unknown-linux-musl - armv5te-unknown-linux-musleabi - aarch64-unknown-linux-ohos - armv7-unknown-linux-ohos - - # Android - aarch64-linux-android - arm-linux-androideabi - - # FreeBSD - aarch64-unknown-freebsd - armv6-unknown-freebsd - powerpc-unknown-freebsd - powerpc64-unknown-freebsd - - # OpenBSD - aarch64-unknown-openbsd - - # Windows - aarch64-pc-windows-msvc -) - -rustup component add rust-src # for -Z build-std - -cd crates/std_detect -for target in "${targets[@]}"; do - if rustup target add "${target}" &>/dev/null; then - cargo build --target "${target}" - else - # tier 3 targets requires -Z build-std. - cargo build -Z build-std="core,alloc" --target "${target}" - fi -done diff --git a/library/stdarch/ci/docker/aarch64-unknown-linux-gnu/Dockerfile b/library/stdarch/ci/docker/aarch64-unknown-linux-gnu/Dockerfile index 17025efffea60..70c06509755cb 100644 --- a/library/stdarch/ci/docker/aarch64-unknown-linux-gnu/Dockerfile +++ b/library/stdarch/ci/docker/aarch64-unknown-linux-gnu/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:25.04 +FROM ubuntu:25.10 RUN apt-get update && apt-get install -y --no-install-recommends \ gcc \ g++ \ @@ -10,7 +10,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ qemu-user \ make \ file \ - clang-19 \ + clang \ lld ENV CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=aarch64-linux-gnu-gcc \ diff --git a/library/stdarch/ci/docker/aarch64_be-unknown-linux-gnu/Dockerfile b/library/stdarch/ci/docker/aarch64_be-unknown-linux-gnu/Dockerfile index 74f770556dbe3..56ddbd990b18b 100644 --- a/library/stdarch/ci/docker/aarch64_be-unknown-linux-gnu/Dockerfile +++ b/library/stdarch/ci/docker/aarch64_be-unknown-linux-gnu/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:25.04 +FROM ubuntu:25.10 RUN apt-get update && apt-get install -y --no-install-recommends \ gcc \ @@ -9,15 +9,15 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ qemu-user \ make \ file \ - clang-19 \ + clang \ curl \ xz-utils \ lld -ENV TOOLCHAIN="arm-gnu-toolchain-14.2.rel1-x86_64-aarch64_be-none-linux-gnu" +ENV TOOLCHAIN="arm-gnu-toolchain-14.3.rel1-x86_64-aarch64_be-none-linux-gnu" # Download the aarch64_be gcc toolchain -RUN curl -L "https://developer.arm.com/-/media/Files/downloads/gnu/14.2.rel1/binrel/${TOOLCHAIN}.tar.xz" -o "${TOOLCHAIN}.tar.xz" +RUN curl -L "https://developer.arm.com/-/media/Files/downloads/gnu/14.3.rel1/binrel/${TOOLCHAIN}.tar.xz" -o "${TOOLCHAIN}.tar.xz" RUN tar -xvf "${TOOLCHAIN}.tar.xz" RUN mkdir /toolchains && mv "./${TOOLCHAIN}" /toolchains diff --git a/library/stdarch/ci/docker/arm-unknown-linux-gnueabihf/Dockerfile b/library/stdarch/ci/docker/arm-unknown-linux-gnueabihf/Dockerfile index 14eaf9f9eef06..6d4ff24828672 100644 --- a/library/stdarch/ci/docker/arm-unknown-linux-gnueabihf/Dockerfile +++ b/library/stdarch/ci/docker/arm-unknown-linux-gnueabihf/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:25.04 +FROM ubuntu:25.10 RUN apt-get update && apt-get install -y --no-install-recommends \ gcc \ ca-certificates \ diff --git a/library/stdarch/ci/docker/armv7-unknown-linux-gnueabihf/Dockerfile b/library/stdarch/ci/docker/armv7-unknown-linux-gnueabihf/Dockerfile index 2086e117d92bc..602249c0ece5a 100644 --- a/library/stdarch/ci/docker/armv7-unknown-linux-gnueabihf/Dockerfile +++ b/library/stdarch/ci/docker/armv7-unknown-linux-gnueabihf/Dockerfile @@ -10,7 +10,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ qemu-user \ make \ file \ - clang-19 \ + clang \ lld ENV CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_LINKER=arm-linux-gnueabihf-gcc \ CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_RUNNER="qemu-arm -cpu max -L /usr/arm-linux-gnueabihf" \ diff --git a/library/stdarch/ci/docker/i586-unknown-linux-gnu/Dockerfile b/library/stdarch/ci/docker/i586-unknown-linux-gnu/Dockerfile index 5a4a22369a805..49d5cecc71ea7 100644 --- a/library/stdarch/ci/docker/i586-unknown-linux-gnu/Dockerfile +++ b/library/stdarch/ci/docker/i586-unknown-linux-gnu/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:25.04 +FROM ubuntu:25.10 RUN apt-get update && apt-get install -y --no-install-recommends \ gcc-multilib \ libc6-dev \ diff --git a/library/stdarch/ci/docker/i686-unknown-linux-gnu/Dockerfile b/library/stdarch/ci/docker/i686-unknown-linux-gnu/Dockerfile index 5a4a22369a805..49d5cecc71ea7 100644 --- a/library/stdarch/ci/docker/i686-unknown-linux-gnu/Dockerfile +++ b/library/stdarch/ci/docker/i686-unknown-linux-gnu/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:25.04 +FROM ubuntu:25.10 RUN apt-get update && apt-get install -y --no-install-recommends \ gcc-multilib \ libc6-dev \ diff --git a/library/stdarch/ci/docker/loongarch64-unknown-linux-gnu/Dockerfile b/library/stdarch/ci/docker/loongarch64-unknown-linux-gnu/Dockerfile index 99ccf286f36de..5ab3431ba2724 100644 --- a/library/stdarch/ci/docker/loongarch64-unknown-linux-gnu/Dockerfile +++ b/library/stdarch/ci/docker/loongarch64-unknown-linux-gnu/Dockerfile @@ -1,9 +1,9 @@ -FROM ubuntu:25.04 +FROM ubuntu:25.10 RUN apt-get update && \ apt-get install -y --no-install-recommends \ gcc libc6-dev qemu-user-static ca-certificates \ - gcc-14-loongarch64-linux-gnu libc6-dev-loong64-cross + gcc-loongarch64-linux-gnu libc6-dev-loong64-cross ENV CARGO_TARGET_LOONGARCH64_UNKNOWN_LINUX_GNU_LINKER=loongarch64-linux-gnu-gcc-14 \ diff --git a/library/stdarch/ci/docker/mips-unknown-linux-gnu/Dockerfile b/library/stdarch/ci/docker/mips-unknown-linux-gnu/Dockerfile index f43a3c966331e..f521ba3179379 100644 --- a/library/stdarch/ci/docker/mips-unknown-linux-gnu/Dockerfile +++ b/library/stdarch/ci/docker/mips-unknown-linux-gnu/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:25.04 +FROM ubuntu:25.10 RUN apt-get update && apt-get install -y --no-install-recommends \ gcc libc6-dev qemu-user ca-certificates \ diff --git a/library/stdarch/ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile b/library/stdarch/ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile index 235ac0997b1e3..a8b352881e813 100644 --- a/library/stdarch/ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile +++ b/library/stdarch/ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:25.04 +FROM ubuntu:25.10 RUN apt-get update && apt-get install -y --no-install-recommends \ gcc libc6-dev qemu-user ca-certificates \ diff --git a/library/stdarch/ci/docker/mips64el-unknown-linux-gnuabi64/Dockerfile b/library/stdarch/ci/docker/mips64el-unknown-linux-gnuabi64/Dockerfile index 6041d8911749e..147a3df614554 100644 --- a/library/stdarch/ci/docker/mips64el-unknown-linux-gnuabi64/Dockerfile +++ b/library/stdarch/ci/docker/mips64el-unknown-linux-gnuabi64/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:25.04 +FROM ubuntu:25.10 RUN apt-get update && apt-get install -y --no-install-recommends \ gcc libc6-dev qemu-user ca-certificates \ diff --git a/library/stdarch/ci/docker/mipsel-unknown-linux-musl/Dockerfile b/library/stdarch/ci/docker/mipsel-unknown-linux-musl/Dockerfile index cd38348eeb5c1..ed57511de7b9a 100644 --- a/library/stdarch/ci/docker/mipsel-unknown-linux-musl/Dockerfile +++ b/library/stdarch/ci/docker/mipsel-unknown-linux-musl/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:25.04 +FROM ubuntu:25.10 RUN apt-get update && \ apt-get install -y --no-install-recommends \ diff --git a/library/stdarch/ci/docker/nvptx64-nvidia-cuda/Dockerfile b/library/stdarch/ci/docker/nvptx64-nvidia-cuda/Dockerfile index 5b4869863c705..65cf281b14773 100644 --- a/library/stdarch/ci/docker/nvptx64-nvidia-cuda/Dockerfile +++ b/library/stdarch/ci/docker/nvptx64-nvidia-cuda/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:25.04 +FROM ubuntu:25.10 RUN apt-get update && apt-get install -y --no-install-recommends \ gcc \ libc6-dev \ diff --git a/library/stdarch/ci/docker/powerpc-unknown-linux-gnu/Dockerfile b/library/stdarch/ci/docker/powerpc-unknown-linux-gnu/Dockerfile index baad95d57843e..82d05f0b25d1e 100644 --- a/library/stdarch/ci/docker/powerpc-unknown-linux-gnu/Dockerfile +++ b/library/stdarch/ci/docker/powerpc-unknown-linux-gnu/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:25.04 +FROM ubuntu:25.10 RUN apt-get update && apt-get install -y --no-install-recommends \ gcc libc6-dev qemu-user ca-certificates \ diff --git a/library/stdarch/ci/docker/powerpc64-unknown-linux-gnu/Dockerfile b/library/stdarch/ci/docker/powerpc64-unknown-linux-gnu/Dockerfile index dcbcb43513ee6..c5460e1544fa8 100644 --- a/library/stdarch/ci/docker/powerpc64-unknown-linux-gnu/Dockerfile +++ b/library/stdarch/ci/docker/powerpc64-unknown-linux-gnu/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:25.04 +FROM ubuntu:25.10 RUN apt-get update && apt-get install -y --no-install-recommends \ gcc libc6-dev qemu-user ca-certificates \ diff --git a/library/stdarch/ci/docker/powerpc64le-unknown-linux-gnu/Dockerfile b/library/stdarch/ci/docker/powerpc64le-unknown-linux-gnu/Dockerfile index 8dfac0ec1e412..3d3eb8c6f34e7 100644 --- a/library/stdarch/ci/docker/powerpc64le-unknown-linux-gnu/Dockerfile +++ b/library/stdarch/ci/docker/powerpc64le-unknown-linux-gnu/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:25.04 +FROM ubuntu:25.10 RUN apt-get update && apt-get install -y --no-install-recommends \ gcc libc6-dev qemu-user ca-certificates \ diff --git a/library/stdarch/ci/docker/riscv32gc-unknown-linux-gnu/Dockerfile b/library/stdarch/ci/docker/riscv32gc-unknown-linux-gnu/Dockerfile index 81f7b6239af53..fb1718b338f8d 100644 --- a/library/stdarch/ci/docker/riscv32gc-unknown-linux-gnu/Dockerfile +++ b/library/stdarch/ci/docker/riscv32gc-unknown-linux-gnu/Dockerfile @@ -1,10 +1,10 @@ -FROM ubuntu:25.04 +FROM ubuntu:25.10 RUN apt-get update && apt-get install -y --no-install-recommends \ gcc libc6-dev qemu-user ca-certificates \ wget xz-utils make file llvm -ENV VERSION=2025.01.20 +ENV VERSION=2025.07.03 RUN wget "https://github.com/riscv-collab/riscv-gnu-toolchain/releases/download/${VERSION}/riscv32-glibc-ubuntu-24.04-gcc-nightly-${VERSION}-nightly.tar.xz" \ -O riscv-toolchain.tar.xz diff --git a/library/stdarch/ci/docker/riscv64gc-unknown-linux-gnu/Dockerfile b/library/stdarch/ci/docker/riscv64gc-unknown-linux-gnu/Dockerfile index 7ee69e46e2e5b..10316cae6c29b 100644 --- a/library/stdarch/ci/docker/riscv64gc-unknown-linux-gnu/Dockerfile +++ b/library/stdarch/ci/docker/riscv64gc-unknown-linux-gnu/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:25.04 +FROM ubuntu:25.10 RUN apt-get update && apt-get install -y --no-install-recommends \ gcc libc6-dev qemu-user ca-certificates \ diff --git a/library/stdarch/ci/docker/s390x-unknown-linux-gnu/Dockerfile b/library/stdarch/ci/docker/s390x-unknown-linux-gnu/Dockerfile index af02ebcbd169c..04e5464b9c928 100644 --- a/library/stdarch/ci/docker/s390x-unknown-linux-gnu/Dockerfile +++ b/library/stdarch/ci/docker/s390x-unknown-linux-gnu/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:25.04 +FROM ubuntu:25.10 RUN apt-get update && apt-get install -y --no-install-recommends \ curl ca-certificates \ diff --git a/library/stdarch/ci/docker/wasm32-wasip1/Dockerfile b/library/stdarch/ci/docker/wasm32-wasip1/Dockerfile index eeafde79733eb..f618b94291f5d 100644 --- a/library/stdarch/ci/docker/wasm32-wasip1/Dockerfile +++ b/library/stdarch/ci/docker/wasm32-wasip1/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:25.04 +FROM ubuntu:25.10 ENV DEBIAN_FRONTEND=noninteractive RUN apt-get update -y && apt-get install -y --no-install-recommends \ @@ -7,7 +7,9 @@ RUN apt-get update -y && apt-get install -y --no-install-recommends \ xz-utils \ clang -RUN curl -L https://github.com/bytecodealliance/wasmtime/releases/download/v18.0.2/wasmtime-v18.0.2-x86_64-linux.tar.xz | tar xJf - -ENV PATH=$PATH:/wasmtime-v18.0.2-x86_64-linux +ENV VERSION=v34.0.1 + +RUN curl -L https://github.com/bytecodealliance/wasmtime/releases/download/${VERSION}/wasmtime-${VERSION}-x86_64-linux.tar.xz | tar xJf - +ENV PATH=$PATH:/wasmtime-${VERSION}-x86_64-linux ENV CARGO_TARGET_WASM32_WASIP1_RUNNER="wasmtime --dir /checkout/target/wasm32-wasip1/release/deps::." diff --git a/library/stdarch/ci/docker/x86_64-unknown-linux-gnu/Dockerfile b/library/stdarch/ci/docker/x86_64-unknown-linux-gnu/Dockerfile index acde432794e5f..99bfd056fb443 100644 --- a/library/stdarch/ci/docker/x86_64-unknown-linux-gnu/Dockerfile +++ b/library/stdarch/ci/docker/x86_64-unknown-linux-gnu/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:25.04 +FROM ubuntu:25.10 RUN apt-get update && apt-get install -y --no-install-recommends \ gcc \ libc6-dev \ @@ -8,7 +8,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ wget \ xz-utils -RUN wget http://ci-mirrors.rust-lang.org/stdarch/sde-external-9.53.0-2025-03-16-lin.tar.xz -O sde.tar.xz +RUN wget http://ci-mirrors.rust-lang.org/stdarch/sde-external-9.58.0-2025-06-16-lin.tar.xz -O sde.tar.xz RUN mkdir intel-sde RUN tar -xJf sde.tar.xz --strip-components=1 -C intel-sde ENV CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUNNER="/intel-sde/sde64 \ diff --git a/library/stdarch/ci/docker/x86_64-unknown-linux-gnu/cpuid.def b/library/stdarch/ci/docker/x86_64-unknown-linux-gnu/cpuid.def index 4cce9d7a3c002..95cef6199311b 100644 --- a/library/stdarch/ci/docker/x86_64-unknown-linux-gnu/cpuid.def +++ b/library/stdarch/ci/docker/x86_64-unknown-linux-gnu/cpuid.def @@ -1,41 +1,35 @@ -# Copyright (C) 2024-2024 Intel Corporation. -# +# Copyright (C) 2017-2025 Intel Corporation. +# # This software and the related documents are Intel copyrighted materials, and your # use of them is governed by the express license under which they were provided to # you ("License"). Unless the License provides otherwise, you may not use, modify, # copy, publish, distribute, disclose or transmit this software or the related # documents without Intel's prior written permission. -# +# # This software and the related documents are provided as is, with no express or # implied warranties, other than those that are expressly stated in the License. # -# The CPUID information in this file is for software enabling purposes only and -# it is not a full and accurate representation of the CPU under development which -# it represents. -# The CPUID information in this file is not a guarantee of the availability of -# features or characteristics in the final released CPU. -# # CPUID_VERSION = 1.0 # Input => Output # EAX ECX => EAX EBX ECX EDX 00000000 ******** => 00000024 68747541 444d4163 69746e65 -00000001 ******** => 000d06f0 00100800 7ffaf3ff bfebfbff +00000001 ******** => 00400f10 00100800 7ffaf3ff bfebfbff 00000002 ******** => 76035a01 00f0b6ff 00000000 00c10000 00000003 ******** => 00000000 00000000 00000000 00000000 -00000004 00000000 => 7c004121 02c0003f 0000003f 00000000 #Deterministic Cache +00000004 00000000 => 7c004121 01c0003f 0000003f 00000000 #Deterministic Cache 00000004 00000001 => 7c004122 01c0003f 0000003f 00000000 -00000004 00000002 => 7c004143 03c0003f 000007ff 00000000 -00000004 00000003 => 7c0fc163 04c0003f 0005ffff 00000004 +00000004 00000002 => 7c004143 03c0003f 000003ff 00000000 +00000004 00000003 => 7c0fc163 0280003f 0000dfff 00000004 00000004 00000004 => 00000000 00000000 00000000 00000000 00000005 ******** => 00000040 00000040 00000003 00042120 #MONITOR/MWAIT 00000006 ******** => 00000077 00000002 00000001 00000000 #Thermal and Power -00000007 00000000 => 00000001 f3bfbfbf bbc05ffe 03d55130 #Extended Features -00000007 00000001 => 88ee00bf 00000002 00000000 1d29cd3e +00000007 00000000 => 00000001 f3bfbfbf bac05ffe 03d54130 #Extended Features +00000007 00000001 => 98ee00bf 00000002 00000020 1d29cd3e 00000008 ******** => 00000000 00000000 00000000 00000000 00000009 ******** => 00000000 00000000 00000000 00000000 #Direct Cache 0000000a ******** => 07300403 00000000 00000000 00000603 -0000000b 00000000 => 00000001 00000002 00000100 0000001e #Extended Topology -0000000b 00000001 => 00000004 00000002 00000201 0000001e +0000000b 00000000 => 00000001 00000002 00000100 00000000 #Extended Topology +0000000b 00000001 => 00000004 00000002 00000201 00000000 0000000c ******** => 00000000 00000000 00000000 00000000 0000000d 00000000 => 000e02e7 00002b00 00002b00 00000000 #xcr0 0000000d 00000001 => 0000001f 00000240 00000100 00000000 @@ -52,10 +46,8 @@ 0000001d 00000001 => 04002000 00080040 00000010 00000000 #AMX Palette1 0000001e 00000000 => 00000001 00004010 00000000 00000000 #AMX Tmul 0000001e 00000001 => 000001ff 00000000 00000000 00000000 -0000001f 00000000 => 00000001 00000002 00000100 0000001e -0000001f 00000001 => 00000007 00000070 00000201 0000001e -0000001f 00000002 => 00000000 00000000 00000002 0000001e -00000024 00000000 => 00000000 00070002 00000000 00000000 #AVX10 +00000024 00000000 => 00000001 00070002 00000000 00000000 #AVX10 +00000024 00000001 => 00000000 00000000 00000004 00000000 80000000 ******** => 80000008 00000000 00000000 00000000 80000001 ******** => 00000000 00000000 00200961 2c100000 80000002 ******** => 00000000 00000000 00000000 00000000 @@ -66,6 +58,6 @@ 80000007 ******** => 00000000 00000000 00000000 00000100 80000008 ******** => 00003028 00000200 00000200 00000000 -# This file was copied from intel-sde/misc/cpuid/dmr/cpuid.def, and modified to +# This file was copied from intel-sde/misc/cpuid/future/cpuid.def, and modified to # use "AuthenticAMD" as the vendor and the support for `XOP`, `SSE4a`, `TBM`, # `AVX512_VP2INTERSECT` and the VEX variants of AVX512 was added in the CPUID. diff --git a/library/stdarch/ci/dox.sh b/library/stdarch/ci/dox.sh index 910265fad84de..94d76d4304722 100755 --- a/library/stdarch/ci/dox.sh +++ b/library/stdarch/ci/dox.sh @@ -16,10 +16,7 @@ dox() { cargo clean --target "${1}" cargo build --verbose --target "${1}" --manifest-path crates/core_arch/Cargo.toml - cargo build --verbose --target "${1}" --manifest-path crates/std_detect/Cargo.toml - cargo doc --verbose --target "${1}" --manifest-path crates/core_arch/Cargo.toml - cargo doc --verbose --target "${1}" --manifest-path crates/std_detect/Cargo.toml } if [ -z "$1" ]; then diff --git a/library/stdarch/ci/run-docker.sh b/library/stdarch/ci/run-docker.sh index 657353004dcb5..d7aa50a8c96f7 100755 --- a/library/stdarch/ci/run-docker.sh +++ b/library/stdarch/ci/run-docker.sh @@ -37,7 +37,6 @@ run() { --env NORUN \ --env RUSTFLAGS \ --env CARGO_UNSTABLE_BUILD_STD \ - --env RUST_STD_DETECT_UNSTABLE \ --volume "${HOME}/.cargo":/cargo \ --volume "$(rustc --print sysroot)":/rust:ro \ --volume "$(pwd)":/checkout:ro \ diff --git a/library/stdarch/ci/run.sh b/library/stdarch/ci/run.sh index 8eadb9285c992..aa4479395d5b6 100755 --- a/library/stdarch/ci/run.sh +++ b/library/stdarch/ci/run.sh @@ -78,20 +78,12 @@ cargo_test() { } CORE_ARCH="--manifest-path=crates/core_arch/Cargo.toml" -STD_DETECT="--manifest-path=crates/std_detect/Cargo.toml" STDARCH_EXAMPLES="--manifest-path=examples/Cargo.toml" INTRINSIC_TEST="--manifest-path=crates/intrinsic-test/Cargo.toml" cargo_test "${CORE_ARCH} ${PROFILE}" if [ "$NOSTD" != "1" ]; then - cargo_test "${STD_DETECT} ${PROFILE}" - - cargo_test "${STD_DETECT} --no-default-features" - cargo_test "${STD_DETECT} --no-default-features --features=std_detect_file_io" - cargo_test "${STD_DETECT} --no-default-features --features=std_detect_dlsym_getauxval" - cargo_test "${STD_DETECT} --no-default-features --features=std_detect_dlsym_getauxval,std_detect_file_io" - cargo_test "${STDARCH_EXAMPLES} ${PROFILE}" fi @@ -139,26 +131,26 @@ case ${TARGET} in cargo_test "${PROFILE}" ;; - # Setup aarch64 & armv7 specific variables, the runner, along with some + # Setup aarch64 & armv7 specific variables, the runner, along with some # tests to skip aarch64-unknown-linux-gnu*) TEST_CPPFLAGS="-fuse-ld=lld -I/usr/aarch64-linux-gnu/include/ -I/usr/aarch64-linux-gnu/include/c++/9/aarch64-linux-gnu/" TEST_SKIP_INTRINSICS=crates/intrinsic-test/missing_aarch64.txt - TEST_CXX_COMPILER="clang++-19" + TEST_CXX_COMPILER="clang++" TEST_RUNNER="${CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUNNER}" ;; aarch64_be-unknown-linux-gnu*) TEST_CPPFLAGS="-fuse-ld=lld" TEST_SKIP_INTRINSICS=crates/intrinsic-test/missing_aarch64.txt - TEST_CXX_COMPILER="clang++-19" + TEST_CXX_COMPILER="clang++" TEST_RUNNER="${CARGO_TARGET_AARCH64_BE_UNKNOWN_LINUX_GNU_RUNNER}" ;; armv7-unknown-linux-gnueabihf*) TEST_CPPFLAGS="-fuse-ld=lld -I/usr/arm-linux-gnueabihf/include/ -I/usr/arm-linux-gnueabihf/include/c++/9/arm-linux-gnueabihf/" TEST_SKIP_INTRINSICS=crates/intrinsic-test/missing_arm.txt - TEST_CXX_COMPILER="clang++-19" + TEST_CXX_COMPILER="clang++" TEST_RUNNER="${CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_RUNNER}" ;; *) diff --git a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs index 32f144bc7adc4..bc4c438038dc5 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs @@ -51,7 +51,7 @@ pub fn __crc32d(crc: u32, data: u64) -> u32 { #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal))] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal2))] pub fn vabal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t { unsafe { let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); @@ -66,7 +66,7 @@ pub fn vabal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t { #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal))] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal2))] pub fn vabal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { unsafe { let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); @@ -81,7 +81,7 @@ pub fn vabal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal))] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal2))] pub fn vabal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { unsafe { let d: int32x2_t = simd_shuffle!(b, b, [2, 3]); @@ -96,7 +96,7 @@ pub fn vabal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal))] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal2))] pub fn vabal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t { unsafe { let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); @@ -110,7 +110,7 @@ pub fn vabal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal))] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal2))] pub fn vabal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t { unsafe { let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); @@ -124,7 +124,7 @@ pub fn vabal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal))] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal2))] pub fn vabal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t { unsafe { let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]); @@ -197,7 +197,7 @@ pub fn vabdh_f16(a: f16, b: f16) -> f16 { #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sabdl))] +#[cfg_attr(test, assert_instr(sabdl2))] pub fn vabdl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { unsafe { let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); @@ -211,7 +211,7 @@ pub fn vabdl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sabdl))] +#[cfg_attr(test, assert_instr(sabdl2))] pub fn vabdl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { unsafe { let c: int32x2_t = simd_shuffle!(a, a, [2, 3]); @@ -225,7 +225,7 @@ pub fn vabdl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sabdl))] +#[cfg_attr(test, assert_instr(sabdl2))] pub fn vabdl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { unsafe { let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); @@ -238,7 +238,7 @@ pub fn vabdl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u8)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uabdl))] +#[cfg_attr(test, assert_instr(uabdl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub fn vabdl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { unsafe { @@ -251,7 +251,7 @@ pub fn vabdl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u16)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uabdl))] +#[cfg_attr(test, assert_instr(uabdl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub fn vabdl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { unsafe { @@ -264,7 +264,7 @@ pub fn vabdl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(uabdl))] +#[cfg_attr(test, assert_instr(uabdl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub fn vabdl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { unsafe { @@ -298,46 +298,40 @@ pub fn vabsq_f64(a: float64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(abs))] pub fn vabs_s64(a: int64x1_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.abs.v1i64" - )] - fn _vabs_s64(a: int64x1_t) -> int64x1_t; + unsafe { + let neg: int64x1_t = simd_neg(a); + let mask: int64x1_t = simd_ge(a, neg); + simd_select(mask, a, neg) } - unsafe { _vabs_s64(a) } } #[doc = "Absolute Value (wrapping)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsd_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s64)"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(abs))] -pub fn vabsd_s64(a: i64) -> i64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.abs.i64" - )] - fn _vabsd_s64(a: i64) -> i64; +pub fn vabsq_s64(a: int64x2_t) -> int64x2_t { + unsafe { + let neg: int64x2_t = simd_neg(a); + let mask: int64x2_t = simd_ge(a, neg); + simd_select(mask, a, neg) } - unsafe { _vabsd_s64(a) } } #[doc = "Absolute Value (wrapping)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsd_s64)"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(abs))] -pub fn vabsq_s64(a: int64x2_t) -> int64x2_t { +pub fn vabsd_s64(a: i64) -> i64 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.abs.v2i64" + link_name = "llvm.aarch64.neon.abs.i64" )] - fn _vabsq_s64(a: int64x2_t) -> int64x2_t; + fn _vabsd_s64(a: i64) -> i64; } - unsafe { _vabsq_s64(a) } + unsafe { _vabsd_s64(a) } } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_s64)"] @@ -604,14 +598,7 @@ pub fn vaddvq_f64(a: float64x2_t) -> f64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub fn vaddv_s32(a: int32x2_t) -> i32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddv.i32.v2i32" - )] - fn _vaddv_s32(a: int32x2_t) -> i32; - } - unsafe { _vaddv_s32(a) } + unsafe { simd_reduce_add_unordered(a) } } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s8)"] @@ -620,14 +607,7 @@ pub fn vaddv_s32(a: int32x2_t) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub fn vaddv_s8(a: int8x8_t) -> i8 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddv.i8.v8i8" - )] - fn _vaddv_s8(a: int8x8_t) -> i8; - } - unsafe { _vaddv_s8(a) } + unsafe { simd_reduce_add_unordered(a) } } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s8)"] @@ -636,14 +616,7 @@ pub fn vaddv_s8(a: int8x8_t) -> i8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub fn vaddvq_s8(a: int8x16_t) -> i8 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddv.i8.v16i8" - )] - fn _vaddvq_s8(a: int8x16_t) -> i8; - } - unsafe { _vaddvq_s8(a) } + unsafe { simd_reduce_add_unordered(a) } } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s16)"] @@ -652,14 +625,7 @@ pub fn vaddvq_s8(a: int8x16_t) -> i8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub fn vaddv_s16(a: int16x4_t) -> i16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddv.i16.v4i16" - )] - fn _vaddv_s16(a: int16x4_t) -> i16; - } - unsafe { _vaddv_s16(a) } + unsafe { simd_reduce_add_unordered(a) } } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s16)"] @@ -668,14 +634,7 @@ pub fn vaddv_s16(a: int16x4_t) -> i16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub fn vaddvq_s16(a: int16x8_t) -> i16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddv.i16.v8i16" - )] - fn _vaddvq_s16(a: int16x8_t) -> i16; - } - unsafe { _vaddvq_s16(a) } + unsafe { simd_reduce_add_unordered(a) } } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s32)"] @@ -684,14 +643,7 @@ pub fn vaddvq_s16(a: int16x8_t) -> i16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub fn vaddvq_s32(a: int32x4_t) -> i32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddv.i32.v4i32" - )] - fn _vaddvq_s32(a: int32x4_t) -> i32; - } - unsafe { _vaddvq_s32(a) } + unsafe { simd_reduce_add_unordered(a) } } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u32)"] @@ -700,14 +652,7 @@ pub fn vaddvq_s32(a: int32x4_t) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub fn vaddv_u32(a: uint32x2_t) -> u32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddv.i32.v2i32" - )] - fn _vaddv_u32(a: uint32x2_t) -> u32; - } - unsafe { _vaddv_u32(a) } + unsafe { simd_reduce_add_unordered(a) } } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u8)"] @@ -716,14 +661,7 @@ pub fn vaddv_u32(a: uint32x2_t) -> u32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub fn vaddv_u8(a: uint8x8_t) -> u8 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddv.i8.v8i8" - )] - fn _vaddv_u8(a: uint8x8_t) -> u8; - } - unsafe { _vaddv_u8(a) } + unsafe { simd_reduce_add_unordered(a) } } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u8)"] @@ -732,14 +670,7 @@ pub fn vaddv_u8(a: uint8x8_t) -> u8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub fn vaddvq_u8(a: uint8x16_t) -> u8 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddv.i8.v16i8" - )] - fn _vaddvq_u8(a: uint8x16_t) -> u8; - } - unsafe { _vaddvq_u8(a) } + unsafe { simd_reduce_add_unordered(a) } } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u16)"] @@ -748,14 +679,7 @@ pub fn vaddvq_u8(a: uint8x16_t) -> u8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub fn vaddv_u16(a: uint16x4_t) -> u16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddv.i16.v4i16" - )] - fn _vaddv_u16(a: uint16x4_t) -> u16; - } - unsafe { _vaddv_u16(a) } + unsafe { simd_reduce_add_unordered(a) } } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u16)"] @@ -764,14 +688,7 @@ pub fn vaddv_u16(a: uint16x4_t) -> u16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub fn vaddvq_u16(a: uint16x8_t) -> u16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddv.i16.v8i16" - )] - fn _vaddvq_u16(a: uint16x8_t) -> u16; - } - unsafe { _vaddvq_u16(a) } + unsafe { simd_reduce_add_unordered(a) } } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u32)"] @@ -780,14 +697,7 @@ pub fn vaddvq_u16(a: uint16x8_t) -> u16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub fn vaddvq_u32(a: uint32x4_t) -> u32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddv.i32.v4i32" - )] - fn _vaddvq_u32(a: uint32x4_t) -> u32; - } - unsafe { _vaddvq_u32(a) } + unsafe { simd_reduce_add_unordered(a) } } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s64)"] @@ -796,14 +706,7 @@ pub fn vaddvq_u32(a: uint32x4_t) -> u32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub fn vaddvq_s64(a: int64x2_t) -> i64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddv.i64.v2i64" - )] - fn _vaddvq_s64(a: int64x2_t) -> i64; - } - unsafe { _vaddvq_s64(a) } + unsafe { simd_reduce_add_unordered(a) } } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u64)"] @@ -812,14 +715,7 @@ pub fn vaddvq_s64(a: int64x2_t) -> i64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub fn vaddvq_u64(a: uint64x2_t) -> u64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddv.i64.v2i64" - )] - fn _vaddvq_u64(a: uint64x2_t) -> u64; - } - unsafe { _vaddvq_u64(a) } + unsafe { simd_reduce_add_unordered(a) } } #[doc = "Multi-vector floating-point absolute maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamax_f32)"] @@ -7281,7 +7177,7 @@ pub fn vcvt_high_f32_f16(a: float16x8_t) -> float32x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f64)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtn))] +#[cfg_attr(test, assert_instr(fcvtn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub fn vcvt_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t { unsafe { simd_shuffle!(a, simd_cast(b), [0, 1, 2, 3]) } @@ -7290,7 +7186,7 @@ pub fn vcvt_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f64_f32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtl))] +#[cfg_attr(test, assert_instr(fcvtl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub fn vcvt_high_f64_f32(a: float32x4_t) -> float64x2_t { unsafe { @@ -9390,7 +9286,7 @@ pub fn vcvtx_f32_f64(a: float64x2_t) -> float32x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_high_f32_f64)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(fcvtxn))] +#[cfg_attr(test, assert_instr(fcvtxn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub fn vcvtx_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t { unsafe { simd_shuffle!(a, vcvtx_f32_f64(b), [0, 1, 2, 3]) } @@ -13229,14 +13125,7 @@ pub fn vmaxh_f16(a: f16, b: f16) -> f16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxnm))] pub fn vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnm.v1f64" - )] - fn _vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; - } - unsafe { _vmaxnm_f64(a, b) } + unsafe { simd_fmax(a, b) } } #[doc = "Floating-point Maximum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f64)"] @@ -13245,14 +13134,7 @@ pub fn vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxnm))] pub fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnm.v2f64" - )] - fn _vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - } - unsafe { _vmaxnmq_f64(a, b) } + unsafe { simd_fmax(a, b) } } #[doc = "Floating-point Maximum Number"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmh_f16)"] @@ -13261,14 +13143,7 @@ pub fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg_attr(test, assert_instr(fmaxnm))] pub fn vmaxnmh_f16(a: f16, b: f16) -> f16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnm.f16" - )] - fn _vmaxnmh_f16(a: f16, b: f16) -> f16; - } - unsafe { _vmaxnmh_f16(a, b) } + f16::max(a, b) } #[doc = "Floating-point maximum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f16)"] @@ -13277,14 +13152,7 @@ pub fn vmaxnmh_f16(a: f16, b: f16) -> f16 { #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg_attr(test, assert_instr(fmaxnmv))] pub fn vmaxnmv_f16(a: float16x4_t) -> f16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnmv.f16.v4f16" - )] - fn _vmaxnmv_f16(a: float16x4_t) -> f16; - } - unsafe { _vmaxnmv_f16(a) } + unsafe { simd_reduce_max(a) } } #[doc = "Floating-point maximum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f16)"] @@ -13293,14 +13161,7 @@ pub fn vmaxnmv_f16(a: float16x4_t) -> f16 { #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg_attr(test, assert_instr(fmaxnmv))] pub fn vmaxnmvq_f16(a: float16x8_t) -> f16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnmv.f16.v8f16" - )] - fn _vmaxnmvq_f16(a: float16x8_t) -> f16; - } - unsafe { _vmaxnmvq_f16(a) } + unsafe { simd_reduce_max(a) } } #[doc = "Floating-point maximum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f32)"] @@ -13309,14 +13170,7 @@ pub fn vmaxnmvq_f16(a: float16x8_t) -> f16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxnmp))] pub fn vmaxnmv_f32(a: float32x2_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32" - )] - fn _vmaxnmv_f32(a: float32x2_t) -> f32; - } - unsafe { _vmaxnmv_f32(a) } + unsafe { simd_reduce_max(a) } } #[doc = "Floating-point maximum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f64)"] @@ -13325,14 +13179,7 @@ pub fn vmaxnmv_f32(a: float32x2_t) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxnmp))] pub fn vmaxnmvq_f64(a: float64x2_t) -> f64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64" - )] - fn _vmaxnmvq_f64(a: float64x2_t) -> f64; - } - unsafe { _vmaxnmvq_f64(a) } + unsafe { simd_reduce_max(a) } } #[doc = "Floating-point maximum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f32)"] @@ -13341,14 +13188,7 @@ pub fn vmaxnmvq_f64(a: float64x2_t) -> f64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxnmv))] pub fn vmaxnmvq_f32(a: float32x4_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnmv.f32.v4f32" - )] - fn _vmaxnmvq_f32(a: float32x4_t) -> f32; - } - unsafe { _vmaxnmvq_f32(a) } + unsafe { simd_reduce_max(a) } } #[doc = "Floating-point maximum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_f16)"] @@ -13437,14 +13277,7 @@ pub fn vmaxvq_f64(a: float64x2_t) -> f64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smaxv))] pub fn vmaxv_s8(a: int8x8_t) -> i8 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxv.i8.v8i8" - )] - fn _vmaxv_s8(a: int8x8_t) -> i8; - } - unsafe { _vmaxv_s8(a) } + unsafe { simd_reduce_max(a) } } #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s8)"] @@ -13453,14 +13286,7 @@ pub fn vmaxv_s8(a: int8x8_t) -> i8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smaxv))] pub fn vmaxvq_s8(a: int8x16_t) -> i8 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxv.i8.v16i8" - )] - fn _vmaxvq_s8(a: int8x16_t) -> i8; - } - unsafe { _vmaxvq_s8(a) } + unsafe { simd_reduce_max(a) } } #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s16)"] @@ -13469,14 +13295,7 @@ pub fn vmaxvq_s8(a: int8x16_t) -> i8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smaxv))] pub fn vmaxv_s16(a: int16x4_t) -> i16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxv.i16.v4i16" - )] - fn _vmaxv_s16(a: int16x4_t) -> i16; - } - unsafe { _vmaxv_s16(a) } + unsafe { simd_reduce_max(a) } } #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s16)"] @@ -13485,14 +13304,7 @@ pub fn vmaxv_s16(a: int16x4_t) -> i16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smaxv))] pub fn vmaxvq_s16(a: int16x8_t) -> i16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxv.i16.v8i16" - )] - fn _vmaxvq_s16(a: int16x8_t) -> i16; - } - unsafe { _vmaxvq_s16(a) } + unsafe { simd_reduce_max(a) } } #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s32)"] @@ -13501,14 +13313,7 @@ pub fn vmaxvq_s16(a: int16x8_t) -> i16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smaxp))] pub fn vmaxv_s32(a: int32x2_t) -> i32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxv.i32.v2i32" - )] - fn _vmaxv_s32(a: int32x2_t) -> i32; - } - unsafe { _vmaxv_s32(a) } + unsafe { simd_reduce_max(a) } } #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s32)"] @@ -13517,14 +13322,7 @@ pub fn vmaxv_s32(a: int32x2_t) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smaxv))] pub fn vmaxvq_s32(a: int32x4_t) -> i32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxv.i32.v4i32" - )] - fn _vmaxvq_s32(a: int32x4_t) -> i32; - } - unsafe { _vmaxvq_s32(a) } + unsafe { simd_reduce_max(a) } } #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u8)"] @@ -13533,14 +13331,7 @@ pub fn vmaxvq_s32(a: int32x4_t) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxv))] pub fn vmaxv_u8(a: uint8x8_t) -> u8 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxv.i8.v8i8" - )] - fn _vmaxv_u8(a: uint8x8_t) -> u8; - } - unsafe { _vmaxv_u8(a) } + unsafe { simd_reduce_max(a) } } #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u8)"] @@ -13549,14 +13340,7 @@ pub fn vmaxv_u8(a: uint8x8_t) -> u8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxv))] pub fn vmaxvq_u8(a: uint8x16_t) -> u8 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxv.i8.v16i8" - )] - fn _vmaxvq_u8(a: uint8x16_t) -> u8; - } - unsafe { _vmaxvq_u8(a) } + unsafe { simd_reduce_max(a) } } #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u16)"] @@ -13565,14 +13349,7 @@ pub fn vmaxvq_u8(a: uint8x16_t) -> u8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxv))] pub fn vmaxv_u16(a: uint16x4_t) -> u16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxv.i16.v4i16" - )] - fn _vmaxv_u16(a: uint16x4_t) -> u16; - } - unsafe { _vmaxv_u16(a) } + unsafe { simd_reduce_max(a) } } #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u16)"] @@ -13581,14 +13358,7 @@ pub fn vmaxv_u16(a: uint16x4_t) -> u16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxv))] pub fn vmaxvq_u16(a: uint16x8_t) -> u16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxv.i16.v8i16" - )] - fn _vmaxvq_u16(a: uint16x8_t) -> u16; - } - unsafe { _vmaxvq_u16(a) } + unsafe { simd_reduce_max(a) } } #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u32)"] @@ -13597,14 +13367,7 @@ pub fn vmaxvq_u16(a: uint16x8_t) -> u16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxp))] pub fn vmaxv_u32(a: uint32x2_t) -> u32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxv.i32.v2i32" - )] - fn _vmaxv_u32(a: uint32x2_t) -> u32; - } - unsafe { _vmaxv_u32(a) } + unsafe { simd_reduce_max(a) } } #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u32)"] @@ -13613,14 +13376,7 @@ pub fn vmaxv_u32(a: uint32x2_t) -> u32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxv))] pub fn vmaxvq_u32(a: uint32x4_t) -> u32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxv.i32.v4i32" - )] - fn _vmaxvq_u32(a: uint32x4_t) -> u32; - } - unsafe { _vmaxvq_u32(a) } + unsafe { simd_reduce_max(a) } } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f64)"] @@ -13677,14 +13433,7 @@ pub fn vminh_f16(a: f16, b: f16) -> f16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminnm))] pub fn vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnm.v1f64" - )] - fn _vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; - } - unsafe { _vminnm_f64(a, b) } + unsafe { simd_fmin(a, b) } } #[doc = "Floating-point Minimum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f64)"] @@ -13693,14 +13442,7 @@ pub fn vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminnm))] pub fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnm.v2f64" - )] - fn _vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - } - unsafe { _vminnmq_f64(a, b) } + unsafe { simd_fmin(a, b) } } #[doc = "Floating-point Minimum Number"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmh_f16)"] @@ -13709,14 +13451,7 @@ pub fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg_attr(test, assert_instr(fminnm))] pub fn vminnmh_f16(a: f16, b: f16) -> f16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnm.f16" - )] - fn _vminnmh_f16(a: f16, b: f16) -> f16; - } - unsafe { _vminnmh_f16(a, b) } + f16::min(a, b) } #[doc = "Floating-point minimum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f16)"] @@ -13725,14 +13460,7 @@ pub fn vminnmh_f16(a: f16, b: f16) -> f16 { #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg_attr(test, assert_instr(fminnmv))] pub fn vminnmv_f16(a: float16x4_t) -> f16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnmv.f16.v4f16" - )] - fn _vminnmv_f16(a: float16x4_t) -> f16; - } - unsafe { _vminnmv_f16(a) } + unsafe { simd_reduce_min(a) } } #[doc = "Floating-point minimum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f16)"] @@ -13741,14 +13469,7 @@ pub fn vminnmv_f16(a: float16x4_t) -> f16 { #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg_attr(test, assert_instr(fminnmv))] pub fn vminnmvq_f16(a: float16x8_t) -> f16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnmv.f16.v8f16" - )] - fn _vminnmvq_f16(a: float16x8_t) -> f16; - } - unsafe { _vminnmvq_f16(a) } + unsafe { simd_reduce_min(a) } } #[doc = "Floating-point minimum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f32)"] @@ -13757,14 +13478,7 @@ pub fn vminnmvq_f16(a: float16x8_t) -> f16 { #[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub fn vminnmv_f32(a: float32x2_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32" - )] - fn _vminnmv_f32(a: float32x2_t) -> f32; - } - unsafe { _vminnmv_f32(a) } + unsafe { simd_reduce_min(a) } } #[doc = "Floating-point minimum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f64)"] @@ -13773,14 +13487,7 @@ pub fn vminnmv_f32(a: float32x2_t) -> f32 { #[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub fn vminnmvq_f64(a: float64x2_t) -> f64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64" - )] - fn _vminnmvq_f64(a: float64x2_t) -> f64; - } - unsafe { _vminnmvq_f64(a) } + unsafe { simd_reduce_min(a) } } #[doc = "Floating-point minimum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f32)"] @@ -13789,14 +13496,7 @@ pub fn vminnmvq_f64(a: float64x2_t) -> f64 { #[cfg_attr(test, assert_instr(fminnmv))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub fn vminnmvq_f32(a: float32x4_t) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnmv.f32.v4f32" - )] - fn _vminnmvq_f32(a: float32x4_t) -> f32; - } - unsafe { _vminnmvq_f32(a) } + unsafe { simd_reduce_min(a) } } #[doc = "Floating-point minimum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_f16)"] @@ -13885,14 +13585,7 @@ pub fn vminvq_f64(a: float64x2_t) -> f64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminv))] pub fn vminv_s8(a: int8x8_t) -> i8 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminv.i8.v8i8" - )] - fn _vminv_s8(a: int8x8_t) -> i8; - } - unsafe { _vminv_s8(a) } + unsafe { simd_reduce_min(a) } } #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s8)"] @@ -13901,14 +13594,7 @@ pub fn vminv_s8(a: int8x8_t) -> i8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminv))] pub fn vminvq_s8(a: int8x16_t) -> i8 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminv.i8.v16i8" - )] - fn _vminvq_s8(a: int8x16_t) -> i8; - } - unsafe { _vminvq_s8(a) } + unsafe { simd_reduce_min(a) } } #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s16)"] @@ -13917,14 +13603,7 @@ pub fn vminvq_s8(a: int8x16_t) -> i8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminv))] pub fn vminv_s16(a: int16x4_t) -> i16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminv.i16.v4i16" - )] - fn _vminv_s16(a: int16x4_t) -> i16; - } - unsafe { _vminv_s16(a) } + unsafe { simd_reduce_min(a) } } #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s16)"] @@ -13933,14 +13612,7 @@ pub fn vminv_s16(a: int16x4_t) -> i16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminv))] pub fn vminvq_s16(a: int16x8_t) -> i16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminv.i16.v8i16" - )] - fn _vminvq_s16(a: int16x8_t) -> i16; - } - unsafe { _vminvq_s16(a) } + unsafe { simd_reduce_min(a) } } #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s32)"] @@ -13949,14 +13621,7 @@ pub fn vminvq_s16(a: int16x8_t) -> i16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminp))] pub fn vminv_s32(a: int32x2_t) -> i32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminv.i32.v2i32" - )] - fn _vminv_s32(a: int32x2_t) -> i32; - } - unsafe { _vminv_s32(a) } + unsafe { simd_reduce_min(a) } } #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s32)"] @@ -13965,14 +13630,7 @@ pub fn vminv_s32(a: int32x2_t) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminv))] pub fn vminvq_s32(a: int32x4_t) -> i32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminv.i32.v4i32" - )] - fn _vminvq_s32(a: int32x4_t) -> i32; - } - unsafe { _vminvq_s32(a) } + unsafe { simd_reduce_min(a) } } #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u8)"] @@ -13981,14 +13639,7 @@ pub fn vminvq_s32(a: int32x4_t) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uminv))] pub fn vminv_u8(a: uint8x8_t) -> u8 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminv.i8.v8i8" - )] - fn _vminv_u8(a: uint8x8_t) -> u8; - } - unsafe { _vminv_u8(a) } + unsafe { simd_reduce_min(a) } } #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u8)"] @@ -13997,14 +13648,7 @@ pub fn vminv_u8(a: uint8x8_t) -> u8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uminv))] pub fn vminvq_u8(a: uint8x16_t) -> u8 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminv.i8.v16i8" - )] - fn _vminvq_u8(a: uint8x16_t) -> u8; - } - unsafe { _vminvq_u8(a) } + unsafe { simd_reduce_min(a) } } #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u16)"] @@ -14013,14 +13657,7 @@ pub fn vminvq_u8(a: uint8x16_t) -> u8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uminv))] pub fn vminv_u16(a: uint16x4_t) -> u16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminv.i16.v4i16" - )] - fn _vminv_u16(a: uint16x4_t) -> u16; - } - unsafe { _vminv_u16(a) } + unsafe { simd_reduce_min(a) } } #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u16)"] @@ -14029,14 +13666,7 @@ pub fn vminv_u16(a: uint16x4_t) -> u16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uminv))] pub fn vminvq_u16(a: uint16x8_t) -> u16 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminv.i16.v8i16" - )] - fn _vminvq_u16(a: uint16x8_t) -> u16; - } - unsafe { _vminvq_u16(a) } + unsafe { simd_reduce_min(a) } } #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u32)"] @@ -14045,14 +13675,7 @@ pub fn vminvq_u16(a: uint16x8_t) -> u16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uminp))] pub fn vminv_u32(a: uint32x2_t) -> u32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminv.i32.v2i32" - )] - fn _vminv_u32(a: uint32x2_t) -> u32; - } - unsafe { _vminv_u32(a) } + unsafe { simd_reduce_min(a) } } #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u32)"] @@ -14061,14 +13684,7 @@ pub fn vminv_u32(a: uint32x2_t) -> u32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uminv))] pub fn vminvq_u32(a: uint32x4_t) -> u32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminv.i32.v4i32" - )] - fn _vminvq_u32(a: uint32x4_t) -> u32; - } - unsafe { _vminvq_u32(a) } + unsafe { simd_reduce_min(a) } } #[doc = "Floating-point multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f64)"] @@ -15277,7 +14893,7 @@ pub fn vmull_high_n_u32(a: uint32x4_t, b: u32) -> uint64x2_t { #[inline] #[target_feature(enable = "neon,aes")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(pmull))] +#[cfg_attr(test, assert_instr(pmull2))] pub fn vmull_high_p64(a: poly64x2_t, b: poly64x2_t) -> p128 { unsafe { vmull_p64(simd_extract!(a, 1), simd_extract!(b, 1)) } } @@ -15286,7 +14902,7 @@ pub fn vmull_high_p64(a: poly64x2_t, b: poly64x2_t) -> p128 { #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(pmull))] +#[cfg_attr(test, assert_instr(pmull2))] pub fn vmull_high_p8(a: poly8x16_t, b: poly8x16_t) -> poly16x8_t { unsafe { let a: poly8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); @@ -15951,23 +15567,11 @@ pub fn vpadds_f32(a: float32x2_t) -> f32 { #[doc = "Add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_s64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub fn vpaddd_s64(a: int64x2_t) -> i64 { - unsafe { transmute(vaddvq_u64(transmute(a))) } -} -#[doc = "Add pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_s64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(addp))] -pub fn vpaddd_s64(a: int64x2_t) -> i64 { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { transmute(vaddvq_u64(transmute(a))) } + unsafe { simd_reduce_add_unordered(a) } } #[doc = "Add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_u64)"] @@ -15976,7 +15580,7 @@ pub fn vpaddd_s64(a: int64x2_t) -> i64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub fn vpaddd_u64(a: uint64x2_t) -> u64 { - vaddvq_u64(a) + unsafe { simd_reduce_add_unordered(a) } } #[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f16)"] @@ -26893,7 +26497,7 @@ pub fn vsubh_f16(a: f16, b: f16) -> f16 { #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ssubl))] +#[cfg_attr(test, assert_instr(ssubl2))] pub fn vsubl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { unsafe { let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); @@ -26908,7 +26512,7 @@ pub fn vsubl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ssubl))] +#[cfg_attr(test, assert_instr(ssubl2))] pub fn vsubl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { unsafe { let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); @@ -26923,7 +26527,7 @@ pub fn vsubl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ssubl))] +#[cfg_attr(test, assert_instr(ssubl2))] pub fn vsubl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { unsafe { let c: int32x2_t = simd_shuffle!(a, a, [2, 3]); @@ -26938,7 +26542,7 @@ pub fn vsubl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usubl))] +#[cfg_attr(test, assert_instr(usubl2))] pub fn vsubl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { unsafe { let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); @@ -26953,7 +26557,7 @@ pub fn vsubl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usubl))] +#[cfg_attr(test, assert_instr(usubl2))] pub fn vsubl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { unsafe { let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); @@ -26968,7 +26572,7 @@ pub fn vsubl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usubl))] +#[cfg_attr(test, assert_instr(usubl2))] pub fn vsubl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { unsafe { let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]); @@ -26983,7 +26587,7 @@ pub fn vsubl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ssubw))] +#[cfg_attr(test, assert_instr(ssubw2))] pub fn vsubw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { unsafe { let c: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); @@ -26995,7 +26599,7 @@ pub fn vsubw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ssubw))] +#[cfg_attr(test, assert_instr(ssubw2))] pub fn vsubw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { unsafe { let c: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); @@ -27007,7 +26611,7 @@ pub fn vsubw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ssubw))] +#[cfg_attr(test, assert_instr(ssubw2))] pub fn vsubw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { unsafe { let c: int32x2_t = simd_shuffle!(b, b, [2, 3]); @@ -27019,7 +26623,7 @@ pub fn vsubw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usubw))] +#[cfg_attr(test, assert_instr(usubw2))] pub fn vsubw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { unsafe { let c: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); @@ -27031,7 +26635,7 @@ pub fn vsubw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usubw))] +#[cfg_attr(test, assert_instr(usubw2))] pub fn vsubw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { unsafe { let c: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); @@ -27043,7 +26647,7 @@ pub fn vsubw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(usubw))] +#[cfg_attr(test, assert_instr(usubw2))] pub fn vsubw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { unsafe { let c: uint32x2_t = simd_shuffle!(b, b, [2, 3]); diff --git a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs index 4df1b741485b9..32531c7da1356 100644 --- a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs +++ b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs @@ -1483,15 +1483,11 @@ pub fn vabsq_f32(a: float32x4_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vabs_s8(a: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.abs.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v8i8")] - fn _vabs_s8(a: int8x8_t) -> int8x8_t; + unsafe { + let neg: int8x8_t = simd_neg(a); + let mask: int8x8_t = simd_ge(a, neg); + simd_select(mask, a, neg) } - unsafe { _vabs_s8(a) } } #[doc = "Absolute value (wrapping)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s8)"] @@ -1512,15 +1508,11 @@ pub fn vabs_s8(a: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vabsq_s8(a: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.abs.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v16i8")] - fn _vabsq_s8(a: int8x16_t) -> int8x16_t; + unsafe { + let neg: int8x16_t = simd_neg(a); + let mask: int8x16_t = simd_ge(a, neg); + simd_select(mask, a, neg) } - unsafe { _vabsq_s8(a) } } #[doc = "Absolute value (wrapping)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s16)"] @@ -1541,15 +1533,11 @@ pub fn vabsq_s8(a: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vabs_s16(a: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.abs.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v4i16")] - fn _vabs_s16(a: int16x4_t) -> int16x4_t; + unsafe { + let neg: int16x4_t = simd_neg(a); + let mask: int16x4_t = simd_ge(a, neg); + simd_select(mask, a, neg) } - unsafe { _vabs_s16(a) } } #[doc = "Absolute value (wrapping)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s16)"] @@ -1570,15 +1558,11 @@ pub fn vabs_s16(a: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vabsq_s16(a: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.abs.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v8i16")] - fn _vabsq_s16(a: int16x8_t) -> int16x8_t; + unsafe { + let neg: int16x8_t = simd_neg(a); + let mask: int16x8_t = simd_ge(a, neg); + simd_select(mask, a, neg) } - unsafe { _vabsq_s16(a) } } #[doc = "Absolute value (wrapping)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s32)"] @@ -1599,15 +1583,11 @@ pub fn vabsq_s16(a: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vabs_s32(a: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.abs.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v2i32")] - fn _vabs_s32(a: int32x2_t) -> int32x2_t; + unsafe { + let neg: int32x2_t = simd_neg(a); + let mask: int32x2_t = simd_ge(a, neg); + simd_select(mask, a, neg) } - unsafe { _vabs_s32(a) } } #[doc = "Absolute value (wrapping)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s32)"] @@ -1628,15 +1608,11 @@ pub fn vabs_s32(a: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vabsq_s32(a: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.abs.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabs.v4i32")] - fn _vabsq_s32(a: int32x4_t) -> int32x4_t; + unsafe { + let neg: int32x4_t = simd_neg(a); + let mask: int32x4_t = simd_ge(a, neg); + simd_select(mask, a, neg) } - unsafe { _vabsq_s32(a) } } #[doc = "Floating-point absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsh_f16)"] @@ -14322,8 +14298,7 @@ pub unsafe fn vld1q_dup_f16(ptr: *const f16) -> float16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_dup_f32(ptr: *const f32) -> float32x2_t { - let x = vld1_lane_f32::<0>(ptr, transmute(f32x2::splat(0.0))); - simd_shuffle!(x, x, [0, 0]) + transmute(f32x2::splat(*ptr)) } #[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_p16)"] @@ -14346,8 +14321,7 @@ pub unsafe fn vld1_dup_f32(ptr: *const f32) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_dup_p16(ptr: *const p16) -> poly16x4_t { - let x = vld1_lane_p16::<0>(ptr, transmute(u16x4::splat(0))); - simd_shuffle!(x, x, [0, 0, 0, 0]) + transmute(u16x4::splat(*ptr)) } #[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_p8)"] @@ -14370,8 +14344,7 @@ pub unsafe fn vld1_dup_p16(ptr: *const p16) -> poly16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_dup_p8(ptr: *const p8) -> poly8x8_t { - let x = vld1_lane_p8::<0>(ptr, transmute(u8x8::splat(0))); - simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) + transmute(u8x8::splat(*ptr)) } #[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_s16)"] @@ -14394,8 +14367,7 @@ pub unsafe fn vld1_dup_p8(ptr: *const p8) -> poly8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_dup_s16(ptr: *const i16) -> int16x4_t { - let x = vld1_lane_s16::<0>(ptr, transmute(i16x4::splat(0))); - simd_shuffle!(x, x, [0, 0, 0, 0]) + transmute(i16x4::splat(*ptr)) } #[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_s32)"] @@ -14418,8 +14390,7 @@ pub unsafe fn vld1_dup_s16(ptr: *const i16) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_dup_s32(ptr: *const i32) -> int32x2_t { - let x = vld1_lane_s32::<0>(ptr, transmute(i32x2::splat(0))); - simd_shuffle!(x, x, [0, 0]) + transmute(i32x2::splat(*ptr)) } #[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_s8)"] @@ -14442,8 +14413,7 @@ pub unsafe fn vld1_dup_s32(ptr: *const i32) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_dup_s8(ptr: *const i8) -> int8x8_t { - let x = vld1_lane_s8::<0>(ptr, transmute(i8x8::splat(0))); - simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) + transmute(i8x8::splat(*ptr)) } #[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_u16)"] @@ -14466,8 +14436,7 @@ pub unsafe fn vld1_dup_s8(ptr: *const i8) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_dup_u16(ptr: *const u16) -> uint16x4_t { - let x = vld1_lane_u16::<0>(ptr, transmute(u16x4::splat(0))); - simd_shuffle!(x, x, [0, 0, 0, 0]) + transmute(u16x4::splat(*ptr)) } #[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_u32)"] @@ -14490,8 +14459,7 @@ pub unsafe fn vld1_dup_u16(ptr: *const u16) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_dup_u32(ptr: *const u32) -> uint32x2_t { - let x = vld1_lane_u32::<0>(ptr, transmute(u32x2::splat(0))); - simd_shuffle!(x, x, [0, 0]) + transmute(u32x2::splat(*ptr)) } #[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_u8)"] @@ -14514,8 +14482,7 @@ pub unsafe fn vld1_dup_u32(ptr: *const u32) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1_dup_u8(ptr: *const u8) -> uint8x8_t { - let x = vld1_lane_u8::<0>(ptr, transmute(u8x8::splat(0))); - simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) + transmute(u8x8::splat(*ptr)) } #[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_f32)"] @@ -14538,8 +14505,7 @@ pub unsafe fn vld1_dup_u8(ptr: *const u8) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_dup_f32(ptr: *const f32) -> float32x4_t { - let x = vld1q_lane_f32::<0>(ptr, transmute(f32x4::splat(0.0))); - simd_shuffle!(x, x, [0, 0, 0, 0]) + transmute(f32x4::splat(*ptr)) } #[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_p16)"] @@ -14562,8 +14528,7 @@ pub unsafe fn vld1q_dup_f32(ptr: *const f32) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_dup_p16(ptr: *const p16) -> poly16x8_t { - let x = vld1q_lane_p16::<0>(ptr, transmute(u16x8::splat(0))); - simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) + transmute(u16x8::splat(*ptr)) } #[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_p8)"] @@ -14586,8 +14551,7 @@ pub unsafe fn vld1q_dup_p16(ptr: *const p16) -> poly16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_dup_p8(ptr: *const p8) -> poly8x16_t { - let x = vld1q_lane_p8::<0>(ptr, transmute(u8x16::splat(0))); - simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) + transmute(u8x16::splat(*ptr)) } #[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_s16)"] @@ -14610,8 +14574,7 @@ pub unsafe fn vld1q_dup_p8(ptr: *const p8) -> poly8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_dup_s16(ptr: *const i16) -> int16x8_t { - let x = vld1q_lane_s16::<0>(ptr, transmute(i16x8::splat(0))); - simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) + transmute(i16x8::splat(*ptr)) } #[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_s32)"] @@ -14634,8 +14597,7 @@ pub unsafe fn vld1q_dup_s16(ptr: *const i16) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_dup_s32(ptr: *const i32) -> int32x4_t { - let x = vld1q_lane_s32::<0>(ptr, transmute(i32x4::splat(0))); - simd_shuffle!(x, x, [0, 0, 0, 0]) + transmute(i32x4::splat(*ptr)) } #[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_s64)"] @@ -14647,7 +14609,7 @@ pub unsafe fn vld1q_dup_s32(ptr: *const i32) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(ld1r) )] #[cfg_attr( not(target_arch = "arm"), @@ -14658,8 +14620,7 @@ pub unsafe fn vld1q_dup_s32(ptr: *const i32) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_dup_s64(ptr: *const i64) -> int64x2_t { - let x = vld1q_lane_s64::<0>(ptr, transmute(i64x2::splat(0))); - simd_shuffle!(x, x, [0, 0]) + transmute(i64x2::splat(*ptr)) } #[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_s8)"] @@ -14682,8 +14643,7 @@ pub unsafe fn vld1q_dup_s64(ptr: *const i64) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_dup_s8(ptr: *const i8) -> int8x16_t { - let x = vld1q_lane_s8::<0>(ptr, transmute(i8x16::splat(0))); - simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) + transmute(i8x16::splat(*ptr)) } #[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_u16)"] @@ -14706,8 +14666,7 @@ pub unsafe fn vld1q_dup_s8(ptr: *const i8) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_dup_u16(ptr: *const u16) -> uint16x8_t { - let x = vld1q_lane_u16::<0>(ptr, transmute(u16x8::splat(0))); - simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) + transmute(u16x8::splat(*ptr)) } #[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_u32)"] @@ -14730,8 +14689,7 @@ pub unsafe fn vld1q_dup_u16(ptr: *const u16) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_dup_u32(ptr: *const u32) -> uint32x4_t { - let x = vld1q_lane_u32::<0>(ptr, transmute(u32x4::splat(0))); - simd_shuffle!(x, x, [0, 0, 0, 0]) + transmute(u32x4::splat(*ptr)) } #[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_u64)"] @@ -14743,7 +14701,7 @@ pub unsafe fn vld1q_dup_u32(ptr: *const u32) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1) + assert_instr(ld1r) )] #[cfg_attr( not(target_arch = "arm"), @@ -14754,8 +14712,7 @@ pub unsafe fn vld1q_dup_u32(ptr: *const u32) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_dup_u64(ptr: *const u64) -> uint64x2_t { - let x = vld1q_lane_u64::<0>(ptr, transmute(u64x2::splat(0))); - simd_shuffle!(x, x, [0, 0]) + transmute(u64x2::splat(*ptr)) } #[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_u8)"] @@ -14778,8 +14735,7 @@ pub unsafe fn vld1q_dup_u64(ptr: *const u64) -> uint64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub unsafe fn vld1q_dup_u8(ptr: *const u8) -> uint8x16_t { - let x = vld1q_lane_u8::<0>(ptr, transmute(u8x16::splat(0))); - simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) + transmute(u8x16::splat(*ptr)) } #[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_p64)"] @@ -27681,15 +27637,10 @@ pub fn vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smax.v8i8" - )] - fn _vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + unsafe { + let mask: int8x8_t = simd_ge(a, b); + simd_select(mask, a, b) } - unsafe { _vmax_s8(a, b) } } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s8)"] @@ -27710,15 +27661,10 @@ pub fn vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smax.v16i8" - )] - fn _vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + unsafe { + let mask: int8x16_t = simd_ge(a, b); + simd_select(mask, a, b) } - unsafe { _vmaxq_s8(a, b) } } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s16)"] @@ -27739,15 +27685,10 @@ pub fn vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smax.v4i16" - )] - fn _vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + unsafe { + let mask: int16x4_t = simd_ge(a, b); + simd_select(mask, a, b) } - unsafe { _vmax_s16(a, b) } } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s16)"] @@ -27768,15 +27709,10 @@ pub fn vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smax.v8i16" - )] - fn _vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + unsafe { + let mask: int16x8_t = simd_ge(a, b); + simd_select(mask, a, b) } - unsafe { _vmaxq_s16(a, b) } } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s32)"] @@ -27797,15 +27733,10 @@ pub fn vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smax.v2i32" - )] - fn _vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + unsafe { + let mask: int32x2_t = simd_ge(a, b); + simd_select(mask, a, b) } - unsafe { _vmax_s32(a, b) } } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s32)"] @@ -27826,15 +27757,10 @@ pub fn vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smax.v4i32" - )] - fn _vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + unsafe { + let mask: int32x4_t = simd_ge(a, b); + simd_select(mask, a, b) } - unsafe { _vmaxq_s32(a, b) } } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u8)"] @@ -27855,15 +27781,10 @@ pub fn vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umax.v8i8" - )] - fn _vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; + unsafe { + let mask: uint8x8_t = simd_ge(a, b); + simd_select(mask, a, b) } - unsafe { _vmax_u8(a, b) } } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u8)"] @@ -27884,15 +27805,10 @@ pub fn vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umax.v16i8" - )] - fn _vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; + unsafe { + let mask: uint8x16_t = simd_ge(a, b); + simd_select(mask, a, b) } - unsafe { _vmaxq_u8(a, b) } } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u16)"] @@ -27913,15 +27829,10 @@ pub fn vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umax.v4i16" - )] - fn _vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; + unsafe { + let mask: uint16x4_t = simd_ge(a, b); + simd_select(mask, a, b) } - unsafe { _vmax_u16(a, b) } } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u16)"] @@ -27942,15 +27853,10 @@ pub fn vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umax.v8i16" - )] - fn _vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; + unsafe { + let mask: uint16x8_t = simd_ge(a, b); + simd_select(mask, a, b) } - unsafe { _vmaxq_u16(a, b) } } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u32)"] @@ -27971,15 +27877,10 @@ pub fn vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umax.v2i32" - )] - fn _vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; + unsafe { + let mask: uint32x2_t = simd_ge(a, b); + simd_select(mask, a, b) } - unsafe { _vmax_u32(a, b) } } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u32)"] @@ -28000,15 +27901,10 @@ pub fn vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umax.v4i32" - )] - fn _vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; + unsafe { + let mask: uint32x4_t = simd_ge(a, b); + simd_select(mask, a, b) } - unsafe { _vmaxq_u32(a, b) } } #[doc = "Floating-point Maximum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f16)"] @@ -28022,15 +27918,7 @@ pub fn vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] pub fn vmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v4f16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnm.v4f16" - )] - fn _vmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t; - } - unsafe { _vmaxnm_f16(a, b) } + unsafe { simd_fmax(a, b) } } #[doc = "Floating-point Maximum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f16)"] @@ -28044,15 +27932,7 @@ pub fn vmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] pub fn vmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v8f16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnm.v8f16" - )] - fn _vmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t; - } - unsafe { _vmaxnmq_f16(a, b) } + unsafe { simd_fmax(a, b) } } #[doc = "Floating-point Maximum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f32)"] @@ -28073,15 +27953,7 @@ pub fn vmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnm.v2f32" - )] - fn _vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - unsafe { _vmaxnm_f32(a, b) } + unsafe { simd_fmax(a, b) } } #[doc = "Floating-point Maximum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f32)"] @@ -28102,15 +27974,7 @@ pub fn vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnm.v4f32" - )] - fn _vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - unsafe { _vmaxnmq_f32(a, b) } + unsafe { simd_fmax(a, b) } } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f16)"] @@ -28233,15 +28097,10 @@ pub fn vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smin.v8i8" - )] - fn _vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + unsafe { + let mask: int8x8_t = simd_le(a, b); + simd_select(mask, a, b) } - unsafe { _vmin_s8(a, b) } } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s8)"] @@ -28262,15 +28121,10 @@ pub fn vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smin.v16i8" - )] - fn _vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + unsafe { + let mask: int8x16_t = simd_le(a, b); + simd_select(mask, a, b) } - unsafe { _vminq_s8(a, b) } } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s16)"] @@ -28291,15 +28145,10 @@ pub fn vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smin.v4i16" - )] - fn _vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + unsafe { + let mask: int16x4_t = simd_le(a, b); + simd_select(mask, a, b) } - unsafe { _vmin_s16(a, b) } } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s16)"] @@ -28320,15 +28169,10 @@ pub fn vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smin.v8i16" - )] - fn _vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + unsafe { + let mask: int16x8_t = simd_le(a, b); + simd_select(mask, a, b) } - unsafe { _vminq_s16(a, b) } } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s32)"] @@ -28349,15 +28193,10 @@ pub fn vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smin.v2i32" - )] - fn _vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + unsafe { + let mask: int32x2_t = simd_le(a, b); + simd_select(mask, a, b) } - unsafe { _vmin_s32(a, b) } } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s32)"] @@ -28378,15 +28217,10 @@ pub fn vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smin.v4i32" - )] - fn _vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + unsafe { + let mask: int32x4_t = simd_le(a, b); + simd_select(mask, a, b) } - unsafe { _vminq_s32(a, b) } } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u8)"] @@ -28407,15 +28241,10 @@ pub fn vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umin.v8i8" - )] - fn _vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; + unsafe { + let mask: uint8x8_t = simd_le(a, b); + simd_select(mask, a, b) } - unsafe { _vmin_u8(a, b) } } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u8)"] @@ -28436,15 +28265,10 @@ pub fn vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umin.v16i8" - )] - fn _vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; + unsafe { + let mask: uint8x16_t = simd_le(a, b); + simd_select(mask, a, b) } - unsafe { _vminq_u8(a, b) } } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u16)"] @@ -28465,15 +28289,10 @@ pub fn vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umin.v4i16" - )] - fn _vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; + unsafe { + let mask: uint16x4_t = simd_le(a, b); + simd_select(mask, a, b) } - unsafe { _vmin_u16(a, b) } } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u16)"] @@ -28494,15 +28313,10 @@ pub fn vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umin.v8i16" - )] - fn _vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; + unsafe { + let mask: uint16x8_t = simd_le(a, b); + simd_select(mask, a, b) } - unsafe { _vminq_u16(a, b) } } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u32)"] @@ -28523,15 +28337,10 @@ pub fn vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umin.v2i32" - )] - fn _vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; + unsafe { + let mask: uint32x2_t = simd_le(a, b); + simd_select(mask, a, b) } - unsafe { _vmin_u32(a, b) } } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u32)"] @@ -28552,15 +28361,10 @@ pub fn vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umin.v4i32" - )] - fn _vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; + unsafe { + let mask: uint32x4_t = simd_le(a, b); + simd_select(mask, a, b) } - unsafe { _vminq_u32(a, b) } } #[doc = "Floating-point Minimum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f16)"] @@ -28574,15 +28378,7 @@ pub fn vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] pub fn vminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v4f16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnm.v4f16" - )] - fn _vminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t; - } - unsafe { _vminnm_f16(a, b) } + unsafe { simd_fmin(a, b) } } #[doc = "Floating-point Minimum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f16)"] @@ -28596,15 +28392,7 @@ pub fn vminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] pub fn vminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v8f16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnm.v8f16" - )] - fn _vminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t; - } - unsafe { _vminnmq_f16(a, b) } + unsafe { simd_fmin(a, b) } } #[doc = "Floating-point Minimum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f32)"] @@ -28625,15 +28413,7 @@ pub fn vminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnm.v2f32" - )] - fn _vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - unsafe { _vminnm_f32(a, b) } + unsafe { simd_fmin(a, b) } } #[doc = "Floating-point Minimum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f32)"] @@ -28654,15 +28434,7 @@ pub fn vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnm.v4f32" - )] - fn _vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - unsafe { _vminnmq_f32(a, b) } + unsafe { simd_fmin(a, b) } } #[doc = "Floating-point multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f32)"] @@ -39566,17 +39338,7 @@ pub fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v8i8")] fn _vqrshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; } - unsafe { - _vqrshrn_n_s16( - a, - const { - int16x8_t([ - -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, - -N as i16, - ]) - }, - ) - } + unsafe { _vqrshrn_n_s16(a, const { int16x8_t([-N as i16; 8]) }) } } #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s32)"] @@ -39592,12 +39354,7 @@ pub fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v4i16")] fn _vqrshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; } - unsafe { - _vqrshrn_n_s32( - a, - const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, - ) - } + unsafe { _vqrshrn_n_s32(a, const { int32x4_t([-N; 4]) }) } } #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s64)"] @@ -39613,7 +39370,7 @@ pub fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v2i32")] fn _vqrshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; } - unsafe { _vqrshrn_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }) } + unsafe { _vqrshrn_n_s64(a, const { int64x2_t([-N as i64; 2]) }) } } #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s16)"] @@ -39806,17 +39563,7 @@ pub fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v8i8")] fn _vqrshrun_n_s16(a: int16x8_t, n: int16x8_t) -> uint8x8_t; } - unsafe { - _vqrshrun_n_s16( - a, - const { - int16x8_t([ - -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, - -N as i16, - ]) - }, - ) - } + unsafe { _vqrshrun_n_s16(a, const { int16x8_t([-N as i16; 8]) }) } } #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s32)"] @@ -39832,12 +39579,7 @@ pub fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v4i16")] fn _vqrshrun_n_s32(a: int32x4_t, n: int32x4_t) -> uint16x4_t; } - unsafe { - _vqrshrun_n_s32( - a, - const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, - ) - } + unsafe { _vqrshrun_n_s32(a, const { int32x4_t([-N; 4]) }) } } #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s64)"] @@ -39853,7 +39595,7 @@ pub fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v2i32")] fn _vqrshrun_n_s64(a: int64x2_t, n: int64x2_t) -> uint32x2_t; } - unsafe { _vqrshrun_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }) } + unsafe { _vqrshrun_n_s64(a, const { int64x2_t([-N as i64; 2]) }) } } #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s16)"] @@ -41038,17 +40780,7 @@ pub fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v8i8")] fn _vqshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; } - unsafe { - _vqshrn_n_s16( - a, - const { - int16x8_t([ - -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, - -N as i16, - ]) - }, - ) - } + unsafe { _vqshrn_n_s16(a, const { int16x8_t([-N as i16; 8]) }) } } #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s32)"] @@ -41064,12 +40796,7 @@ pub fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v4i16")] fn _vqshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; } - unsafe { - _vqshrn_n_s32( - a, - const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, - ) - } + unsafe { _vqshrn_n_s32(a, const { int32x4_t([-N; 4]) }) } } #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s64)"] @@ -41085,7 +40812,7 @@ pub fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v2i32")] fn _vqshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; } - unsafe { _vqshrn_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }) } + unsafe { _vqshrn_n_s64(a, const { int64x2_t([-N as i64; 2]) }) } } #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)"] @@ -41278,17 +41005,7 @@ pub fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v8i8")] fn _vqshrun_n_s16(a: int16x8_t, n: int16x8_t) -> uint8x8_t; } - unsafe { - _vqshrun_n_s16( - a, - const { - int16x8_t([ - -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, - -N as i16, - ]) - }, - ) - } + unsafe { _vqshrun_n_s16(a, const { int16x8_t([-N as i16; 8]) }) } } #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)"] @@ -41304,12 +41021,7 @@ pub fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v4i16")] fn _vqshrun_n_s32(a: int32x4_t, n: int32x4_t) -> uint16x4_t; } - unsafe { - _vqshrun_n_s32( - a, - const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, - ) - } + unsafe { _vqshrun_n_s32(a, const { int32x4_t([-N; 4]) }) } } #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)"] @@ -41325,7 +41037,7 @@ pub fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v2i32")] fn _vqshrun_n_s64(a: int64x2_t, n: int64x2_t) -> uint32x2_t; } - unsafe { _vqshrun_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }) } + unsafe { _vqshrun_n_s64(a, const { int64x2_t([-N as i64; 2]) }) } } #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)"] @@ -59483,17 +59195,7 @@ pub fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v8i8")] fn _vrshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; } - unsafe { - _vrshrn_n_s16( - a, - const { - int16x8_t([ - -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, - -N as i16, - ]) - }, - ) - } + unsafe { _vrshrn_n_s16(a, const { int16x8_t([-N as i16; 8]) }) } } #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s32)"] @@ -59509,12 +59211,7 @@ pub fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v4i16")] fn _vrshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; } - unsafe { - _vrshrn_n_s32( - a, - const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, - ) - } + unsafe { _vrshrn_n_s32(a, const { int32x4_t([-N; 4]) }) } } #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s64)"] @@ -59530,7 +59227,7 @@ pub fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v2i32")] fn _vrshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; } - unsafe { _vrshrn_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }) } + unsafe { _vrshrn_n_s64(a, const { int64x2_t([-N as i64; 2]) }) } } #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s16)"] @@ -63183,7 +62880,7 @@ pub fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { transmute(vshiftins_v2i32( transmute(a), transmute(b), - int32x2_t::splat(N as i32), + int32x2_t::splat(N), )) } } @@ -63201,7 +62898,7 @@ pub fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { transmute(vshiftins_v4i32( transmute(a), transmute(b), - int32x4_t::splat(N as i32), + int32x4_t::splat(N), )) } } @@ -63739,7 +63436,7 @@ pub fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[rustc_legacy_const_generics(2)] pub fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert!(1 <= N && N <= 32); - vshiftins_v2i32(a, b, int32x2_t::splat(-N as i32)) + vshiftins_v2i32(a, b, int32x2_t::splat(-N)) } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s32)"] @@ -63751,7 +63448,7 @@ pub fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[rustc_legacy_const_generics(2)] pub fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert!(1 <= N && N <= 32); - vshiftins_v4i32(a, b, int32x4_t::splat(-N as i32)) + vshiftins_v4i32(a, b, int32x4_t::splat(-N)) } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s64)"] @@ -73297,7 +72994,11 @@ pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -73315,7 +73016,11 @@ pub fn vtrn_f16(a: float16x4_t, b: float16x4_t) -> float16x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -73334,7 +73039,11 @@ pub fn vtrnq_f16(a: float16x8_t, b: float16x8_t) -> float16x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(zip1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -73359,7 +73068,11 @@ pub fn vtrn_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(zip1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -73384,7 +73097,11 @@ pub fn vtrn_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(zip1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -73409,7 +73126,11 @@ pub fn vtrn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -73434,7 +73155,11 @@ pub fn vtrnq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -73459,7 +73184,11 @@ pub fn vtrn_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -73492,7 +73221,11 @@ pub fn vtrnq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -73517,7 +73250,11 @@ pub fn vtrn_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -73542,7 +73279,11 @@ pub fn vtrnq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -73567,7 +73308,11 @@ pub fn vtrnq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -73592,7 +73337,11 @@ pub fn vtrn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -73625,7 +73374,11 @@ pub fn vtrnq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -73650,7 +73403,11 @@ pub fn vtrn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -73675,7 +73432,11 @@ pub fn vtrnq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -73700,7 +73461,11 @@ pub fn vtrnq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -73725,7 +73490,11 @@ pub fn vtrn_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -73758,7 +73527,11 @@ pub fn vtrnq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -73783,7 +73556,11 @@ pub fn vtrn_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -74349,7 +74126,11 @@ pub fn vusmmlaq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -74367,7 +74148,11 @@ pub fn vuzp_f16(a: float16x4_t, b: float16x4_t) -> float16x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -74386,7 +74171,11 @@ pub fn vuzpq_f16(a: float16x8_t, b: float16x8_t) -> float16x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(zip1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -74411,7 +74200,11 @@ pub fn vuzp_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(zip1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -74436,7 +74229,11 @@ pub fn vuzp_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(zip1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -74461,7 +74258,11 @@ pub fn vuzp_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -74486,7 +74287,11 @@ pub fn vuzpq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -74511,7 +74316,11 @@ pub fn vuzp_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -74544,7 +74353,11 @@ pub fn vuzpq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -74569,7 +74382,11 @@ pub fn vuzp_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -74594,7 +74411,11 @@ pub fn vuzpq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -74619,7 +74440,11 @@ pub fn vuzpq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -74644,7 +74469,11 @@ pub fn vuzp_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -74677,7 +74506,11 @@ pub fn vuzpq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -74702,7 +74535,11 @@ pub fn vuzp_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -74727,7 +74564,11 @@ pub fn vuzpq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -74752,7 +74593,11 @@ pub fn vuzpq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -74777,7 +74622,11 @@ pub fn vuzp_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -74810,7 +74659,11 @@ pub fn vuzpq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -74835,7 +74688,11 @@ pub fn vuzp_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -74859,7 +74716,11 @@ pub fn vuzpq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vzip.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(zip1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip2) )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -74877,7 +74738,11 @@ pub fn vzip_f16(a: float16x4_t, b: float16x4_t) -> float16x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vzip.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(zip1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip2) )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -74896,7 +74761,11 @@ pub fn vzipq_f16(a: float16x8_t, b: float16x8_t) -> float16x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(zip1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -74921,7 +74790,11 @@ pub fn vzip_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(zip1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -74946,7 +74819,11 @@ pub fn vzip_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(zip1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -74971,7 +74848,11 @@ pub fn vzip_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(zip1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -74996,7 +74877,11 @@ pub fn vzip_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(zip1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -75021,7 +74906,11 @@ pub fn vzip_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(zip1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -75046,7 +74935,11 @@ pub fn vzip_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(zip1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -75071,7 +74964,11 @@ pub fn vzip_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(zip1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -75096,7 +74993,11 @@ pub fn vzip_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(zip1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -75121,7 +75022,11 @@ pub fn vzip_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(zip1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -75146,7 +75051,11 @@ pub fn vzipq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(zip1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -75179,7 +75088,11 @@ pub fn vzipq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(zip1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -75204,7 +75117,11 @@ pub fn vzipq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(zip1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -75229,7 +75146,11 @@ pub fn vzipq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(zip1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -75262,7 +75183,11 @@ pub fn vzipq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(zip1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -75287,7 +75212,11 @@ pub fn vzipq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(zip1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -75312,7 +75241,11 @@ pub fn vzipq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(zip1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -75345,7 +75278,11 @@ pub fn vzipq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip) + assert_instr(zip1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), diff --git a/library/stdarch/crates/core_arch/src/core_arch_docs.md b/library/stdarch/crates/core_arch/src/core_arch_docs.md index bfa1b7228860b..6aea2b4618465 100644 --- a/library/stdarch/crates/core_arch/src/core_arch_docs.md +++ b/library/stdarch/crates/core_arch/src/core_arch_docs.md @@ -193,6 +193,7 @@ others at: * [`powerpc64`] * [`nvptx`] * [`wasm32`] +* [`loongarch32`] * [`loongarch64`] * [`s390x`] @@ -208,6 +209,7 @@ others at: [`powerpc64`]: ../../core/arch/powerpc64/index.html [`nvptx`]: ../../core/arch/nvptx/index.html [`wasm32`]: ../../core/arch/wasm32/index.html +[`loongarch32`]: ../../core/arch/loongarch32/index.html [`loongarch64`]: ../../core/arch/loongarch64/index.html [`s390x`]: ../../core/arch/s390x/index.html diff --git a/library/stdarch/crates/core_arch/src/loongarch32/mod.rs b/library/stdarch/crates/core_arch/src/loongarch32/mod.rs new file mode 100644 index 0000000000000..fb05450373c28 --- /dev/null +++ b/library/stdarch/crates/core_arch/src/loongarch32/mod.rs @@ -0,0 +1,47 @@ +//! `LoongArch32` intrinsics + +use crate::arch::asm; + +#[allow(improper_ctypes)] +unsafe extern "unadjusted" { + #[link_name = "llvm.loongarch.cacop.w"] + fn __cacop(a: i32, b: i32, c: i32); + #[link_name = "llvm.loongarch.csrrd.w"] + fn __csrrd(a: i32) -> i32; + #[link_name = "llvm.loongarch.csrwr.w"] + fn __csrwr(a: i32, b: i32) -> i32; + #[link_name = "llvm.loongarch.csrxchg.w"] + fn __csrxchg(a: i32, b: i32, c: i32) -> i32; +} + +/// Generates the cache operation instruction +#[inline] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn cacop(a: i32, b: i32) { + static_assert_simm_bits!(IMM12, 12); + __cacop(a, b, IMM12); +} + +/// Reads the CSR +#[inline] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn csrrd() -> i32 { + static_assert_uimm_bits!(IMM14, 14); + __csrrd(IMM14) +} + +/// Writes the CSR +#[inline] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn csrwr(a: i32) -> i32 { + static_assert_uimm_bits!(IMM14, 14); + __csrwr(a, IMM14) +} + +/// Exchanges the CSR +#[inline] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn csrxchg(a: i32, b: i32) -> i32 { + static_assert_uimm_bits!(IMM14, 14); + __csrxchg(a, b, IMM14) +} diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs b/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs index 2e56d8fb9b83d..4361acdc1fcc0 100644 --- a/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs +++ b/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs @@ -1495,3501 +1495,3501 @@ unsafe extern "unadjusted" { #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsll_b(a: v32i8, b: v32i8) -> v32i8 { - __lasx_xvsll_b(a, b) +pub fn lasx_xvsll_b(a: v32i8, b: v32i8) -> v32i8 { + unsafe { __lasx_xvsll_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsll_h(a: v16i16, b: v16i16) -> v16i16 { - __lasx_xvsll_h(a, b) +pub fn lasx_xvsll_h(a: v16i16, b: v16i16) -> v16i16 { + unsafe { __lasx_xvsll_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsll_w(a: v8i32, b: v8i32) -> v8i32 { - __lasx_xvsll_w(a, b) +pub fn lasx_xvsll_w(a: v8i32, b: v8i32) -> v8i32 { + unsafe { __lasx_xvsll_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsll_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvsll_d(a, b) +pub fn lasx_xvsll_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvsll_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvslli_b(a: v32i8) -> v32i8 { +pub fn lasx_xvslli_b(a: v32i8) -> v32i8 { static_assert_uimm_bits!(IMM3, 3); - __lasx_xvslli_b(a, IMM3) + unsafe { __lasx_xvslli_b(a, IMM3) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvslli_h(a: v16i16) -> v16i16 { +pub fn lasx_xvslli_h(a: v16i16) -> v16i16 { static_assert_uimm_bits!(IMM4, 4); - __lasx_xvslli_h(a, IMM4) + unsafe { __lasx_xvslli_h(a, IMM4) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvslli_w(a: v8i32) -> v8i32 { +pub fn lasx_xvslli_w(a: v8i32) -> v8i32 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvslli_w(a, IMM5) + unsafe { __lasx_xvslli_w(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvslli_d(a: v4i64) -> v4i64 { +pub fn lasx_xvslli_d(a: v4i64) -> v4i64 { static_assert_uimm_bits!(IMM6, 6); - __lasx_xvslli_d(a, IMM6) + unsafe { __lasx_xvslli_d(a, IMM6) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsra_b(a: v32i8, b: v32i8) -> v32i8 { - __lasx_xvsra_b(a, b) +pub fn lasx_xvsra_b(a: v32i8, b: v32i8) -> v32i8 { + unsafe { __lasx_xvsra_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsra_h(a: v16i16, b: v16i16) -> v16i16 { - __lasx_xvsra_h(a, b) +pub fn lasx_xvsra_h(a: v16i16, b: v16i16) -> v16i16 { + unsafe { __lasx_xvsra_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsra_w(a: v8i32, b: v8i32) -> v8i32 { - __lasx_xvsra_w(a, b) +pub fn lasx_xvsra_w(a: v8i32, b: v8i32) -> v8i32 { + unsafe { __lasx_xvsra_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsra_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvsra_d(a, b) +pub fn lasx_xvsra_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvsra_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrai_b(a: v32i8) -> v32i8 { +pub fn lasx_xvsrai_b(a: v32i8) -> v32i8 { static_assert_uimm_bits!(IMM3, 3); - __lasx_xvsrai_b(a, IMM3) + unsafe { __lasx_xvsrai_b(a, IMM3) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrai_h(a: v16i16) -> v16i16 { +pub fn lasx_xvsrai_h(a: v16i16) -> v16i16 { static_assert_uimm_bits!(IMM4, 4); - __lasx_xvsrai_h(a, IMM4) + unsafe { __lasx_xvsrai_h(a, IMM4) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrai_w(a: v8i32) -> v8i32 { +pub fn lasx_xvsrai_w(a: v8i32) -> v8i32 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvsrai_w(a, IMM5) + unsafe { __lasx_xvsrai_w(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrai_d(a: v4i64) -> v4i64 { +pub fn lasx_xvsrai_d(a: v4i64) -> v4i64 { static_assert_uimm_bits!(IMM6, 6); - __lasx_xvsrai_d(a, IMM6) + unsafe { __lasx_xvsrai_d(a, IMM6) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrar_b(a: v32i8, b: v32i8) -> v32i8 { - __lasx_xvsrar_b(a, b) +pub fn lasx_xvsrar_b(a: v32i8, b: v32i8) -> v32i8 { + unsafe { __lasx_xvsrar_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrar_h(a: v16i16, b: v16i16) -> v16i16 { - __lasx_xvsrar_h(a, b) +pub fn lasx_xvsrar_h(a: v16i16, b: v16i16) -> v16i16 { + unsafe { __lasx_xvsrar_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrar_w(a: v8i32, b: v8i32) -> v8i32 { - __lasx_xvsrar_w(a, b) +pub fn lasx_xvsrar_w(a: v8i32, b: v8i32) -> v8i32 { + unsafe { __lasx_xvsrar_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrar_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvsrar_d(a, b) +pub fn lasx_xvsrar_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvsrar_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrari_b(a: v32i8) -> v32i8 { +pub fn lasx_xvsrari_b(a: v32i8) -> v32i8 { static_assert_uimm_bits!(IMM3, 3); - __lasx_xvsrari_b(a, IMM3) + unsafe { __lasx_xvsrari_b(a, IMM3) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrari_h(a: v16i16) -> v16i16 { +pub fn lasx_xvsrari_h(a: v16i16) -> v16i16 { static_assert_uimm_bits!(IMM4, 4); - __lasx_xvsrari_h(a, IMM4) + unsafe { __lasx_xvsrari_h(a, IMM4) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrari_w(a: v8i32) -> v8i32 { +pub fn lasx_xvsrari_w(a: v8i32) -> v8i32 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvsrari_w(a, IMM5) + unsafe { __lasx_xvsrari_w(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrari_d(a: v4i64) -> v4i64 { +pub fn lasx_xvsrari_d(a: v4i64) -> v4i64 { static_assert_uimm_bits!(IMM6, 6); - __lasx_xvsrari_d(a, IMM6) + unsafe { __lasx_xvsrari_d(a, IMM6) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrl_b(a: v32i8, b: v32i8) -> v32i8 { - __lasx_xvsrl_b(a, b) +pub fn lasx_xvsrl_b(a: v32i8, b: v32i8) -> v32i8 { + unsafe { __lasx_xvsrl_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrl_h(a: v16i16, b: v16i16) -> v16i16 { - __lasx_xvsrl_h(a, b) +pub fn lasx_xvsrl_h(a: v16i16, b: v16i16) -> v16i16 { + unsafe { __lasx_xvsrl_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrl_w(a: v8i32, b: v8i32) -> v8i32 { - __lasx_xvsrl_w(a, b) +pub fn lasx_xvsrl_w(a: v8i32, b: v8i32) -> v8i32 { + unsafe { __lasx_xvsrl_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrl_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvsrl_d(a, b) +pub fn lasx_xvsrl_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvsrl_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrli_b(a: v32i8) -> v32i8 { +pub fn lasx_xvsrli_b(a: v32i8) -> v32i8 { static_assert_uimm_bits!(IMM3, 3); - __lasx_xvsrli_b(a, IMM3) + unsafe { __lasx_xvsrli_b(a, IMM3) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrli_h(a: v16i16) -> v16i16 { +pub fn lasx_xvsrli_h(a: v16i16) -> v16i16 { static_assert_uimm_bits!(IMM4, 4); - __lasx_xvsrli_h(a, IMM4) + unsafe { __lasx_xvsrli_h(a, IMM4) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrli_w(a: v8i32) -> v8i32 { +pub fn lasx_xvsrli_w(a: v8i32) -> v8i32 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvsrli_w(a, IMM5) + unsafe { __lasx_xvsrli_w(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrli_d(a: v4i64) -> v4i64 { +pub fn lasx_xvsrli_d(a: v4i64) -> v4i64 { static_assert_uimm_bits!(IMM6, 6); - __lasx_xvsrli_d(a, IMM6) + unsafe { __lasx_xvsrli_d(a, IMM6) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrlr_b(a: v32i8, b: v32i8) -> v32i8 { - __lasx_xvsrlr_b(a, b) +pub fn lasx_xvsrlr_b(a: v32i8, b: v32i8) -> v32i8 { + unsafe { __lasx_xvsrlr_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrlr_h(a: v16i16, b: v16i16) -> v16i16 { - __lasx_xvsrlr_h(a, b) +pub fn lasx_xvsrlr_h(a: v16i16, b: v16i16) -> v16i16 { + unsafe { __lasx_xvsrlr_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrlr_w(a: v8i32, b: v8i32) -> v8i32 { - __lasx_xvsrlr_w(a, b) +pub fn lasx_xvsrlr_w(a: v8i32, b: v8i32) -> v8i32 { + unsafe { __lasx_xvsrlr_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrlr_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvsrlr_d(a, b) +pub fn lasx_xvsrlr_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvsrlr_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrlri_b(a: v32i8) -> v32i8 { +pub fn lasx_xvsrlri_b(a: v32i8) -> v32i8 { static_assert_uimm_bits!(IMM3, 3); - __lasx_xvsrlri_b(a, IMM3) + unsafe { __lasx_xvsrlri_b(a, IMM3) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrlri_h(a: v16i16) -> v16i16 { +pub fn lasx_xvsrlri_h(a: v16i16) -> v16i16 { static_assert_uimm_bits!(IMM4, 4); - __lasx_xvsrlri_h(a, IMM4) + unsafe { __lasx_xvsrlri_h(a, IMM4) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrlri_w(a: v8i32) -> v8i32 { +pub fn lasx_xvsrlri_w(a: v8i32) -> v8i32 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvsrlri_w(a, IMM5) + unsafe { __lasx_xvsrlri_w(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrlri_d(a: v4i64) -> v4i64 { +pub fn lasx_xvsrlri_d(a: v4i64) -> v4i64 { static_assert_uimm_bits!(IMM6, 6); - __lasx_xvsrlri_d(a, IMM6) + unsafe { __lasx_xvsrlri_d(a, IMM6) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvbitclr_b(a: v32u8, b: v32u8) -> v32u8 { - __lasx_xvbitclr_b(a, b) +pub fn lasx_xvbitclr_b(a: v32u8, b: v32u8) -> v32u8 { + unsafe { __lasx_xvbitclr_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvbitclr_h(a: v16u16, b: v16u16) -> v16u16 { - __lasx_xvbitclr_h(a, b) +pub fn lasx_xvbitclr_h(a: v16u16, b: v16u16) -> v16u16 { + unsafe { __lasx_xvbitclr_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvbitclr_w(a: v8u32, b: v8u32) -> v8u32 { - __lasx_xvbitclr_w(a, b) +pub fn lasx_xvbitclr_w(a: v8u32, b: v8u32) -> v8u32 { + unsafe { __lasx_xvbitclr_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvbitclr_d(a: v4u64, b: v4u64) -> v4u64 { - __lasx_xvbitclr_d(a, b) +pub fn lasx_xvbitclr_d(a: v4u64, b: v4u64) -> v4u64 { + unsafe { __lasx_xvbitclr_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvbitclri_b(a: v32u8) -> v32u8 { +pub fn lasx_xvbitclri_b(a: v32u8) -> v32u8 { static_assert_uimm_bits!(IMM3, 3); - __lasx_xvbitclri_b(a, IMM3) + unsafe { __lasx_xvbitclri_b(a, IMM3) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvbitclri_h(a: v16u16) -> v16u16 { +pub fn lasx_xvbitclri_h(a: v16u16) -> v16u16 { static_assert_uimm_bits!(IMM4, 4); - __lasx_xvbitclri_h(a, IMM4) + unsafe { __lasx_xvbitclri_h(a, IMM4) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvbitclri_w(a: v8u32) -> v8u32 { +pub fn lasx_xvbitclri_w(a: v8u32) -> v8u32 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvbitclri_w(a, IMM5) + unsafe { __lasx_xvbitclri_w(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvbitclri_d(a: v4u64) -> v4u64 { +pub fn lasx_xvbitclri_d(a: v4u64) -> v4u64 { static_assert_uimm_bits!(IMM6, 6); - __lasx_xvbitclri_d(a, IMM6) + unsafe { __lasx_xvbitclri_d(a, IMM6) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvbitset_b(a: v32u8, b: v32u8) -> v32u8 { - __lasx_xvbitset_b(a, b) +pub fn lasx_xvbitset_b(a: v32u8, b: v32u8) -> v32u8 { + unsafe { __lasx_xvbitset_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvbitset_h(a: v16u16, b: v16u16) -> v16u16 { - __lasx_xvbitset_h(a, b) +pub fn lasx_xvbitset_h(a: v16u16, b: v16u16) -> v16u16 { + unsafe { __lasx_xvbitset_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvbitset_w(a: v8u32, b: v8u32) -> v8u32 { - __lasx_xvbitset_w(a, b) +pub fn lasx_xvbitset_w(a: v8u32, b: v8u32) -> v8u32 { + unsafe { __lasx_xvbitset_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvbitset_d(a: v4u64, b: v4u64) -> v4u64 { - __lasx_xvbitset_d(a, b) +pub fn lasx_xvbitset_d(a: v4u64, b: v4u64) -> v4u64 { + unsafe { __lasx_xvbitset_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvbitseti_b(a: v32u8) -> v32u8 { +pub fn lasx_xvbitseti_b(a: v32u8) -> v32u8 { static_assert_uimm_bits!(IMM3, 3); - __lasx_xvbitseti_b(a, IMM3) + unsafe { __lasx_xvbitseti_b(a, IMM3) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvbitseti_h(a: v16u16) -> v16u16 { +pub fn lasx_xvbitseti_h(a: v16u16) -> v16u16 { static_assert_uimm_bits!(IMM4, 4); - __lasx_xvbitseti_h(a, IMM4) + unsafe { __lasx_xvbitseti_h(a, IMM4) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvbitseti_w(a: v8u32) -> v8u32 { +pub fn lasx_xvbitseti_w(a: v8u32) -> v8u32 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvbitseti_w(a, IMM5) + unsafe { __lasx_xvbitseti_w(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvbitseti_d(a: v4u64) -> v4u64 { +pub fn lasx_xvbitseti_d(a: v4u64) -> v4u64 { static_assert_uimm_bits!(IMM6, 6); - __lasx_xvbitseti_d(a, IMM6) + unsafe { __lasx_xvbitseti_d(a, IMM6) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvbitrev_b(a: v32u8, b: v32u8) -> v32u8 { - __lasx_xvbitrev_b(a, b) +pub fn lasx_xvbitrev_b(a: v32u8, b: v32u8) -> v32u8 { + unsafe { __lasx_xvbitrev_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvbitrev_h(a: v16u16, b: v16u16) -> v16u16 { - __lasx_xvbitrev_h(a, b) +pub fn lasx_xvbitrev_h(a: v16u16, b: v16u16) -> v16u16 { + unsafe { __lasx_xvbitrev_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvbitrev_w(a: v8u32, b: v8u32) -> v8u32 { - __lasx_xvbitrev_w(a, b) +pub fn lasx_xvbitrev_w(a: v8u32, b: v8u32) -> v8u32 { + unsafe { __lasx_xvbitrev_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvbitrev_d(a: v4u64, b: v4u64) -> v4u64 { - __lasx_xvbitrev_d(a, b) +pub fn lasx_xvbitrev_d(a: v4u64, b: v4u64) -> v4u64 { + unsafe { __lasx_xvbitrev_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvbitrevi_b(a: v32u8) -> v32u8 { +pub fn lasx_xvbitrevi_b(a: v32u8) -> v32u8 { static_assert_uimm_bits!(IMM3, 3); - __lasx_xvbitrevi_b(a, IMM3) + unsafe { __lasx_xvbitrevi_b(a, IMM3) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvbitrevi_h(a: v16u16) -> v16u16 { +pub fn lasx_xvbitrevi_h(a: v16u16) -> v16u16 { static_assert_uimm_bits!(IMM4, 4); - __lasx_xvbitrevi_h(a, IMM4) + unsafe { __lasx_xvbitrevi_h(a, IMM4) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvbitrevi_w(a: v8u32) -> v8u32 { +pub fn lasx_xvbitrevi_w(a: v8u32) -> v8u32 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvbitrevi_w(a, IMM5) + unsafe { __lasx_xvbitrevi_w(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvbitrevi_d(a: v4u64) -> v4u64 { +pub fn lasx_xvbitrevi_d(a: v4u64) -> v4u64 { static_assert_uimm_bits!(IMM6, 6); - __lasx_xvbitrevi_d(a, IMM6) + unsafe { __lasx_xvbitrevi_d(a, IMM6) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvadd_b(a: v32i8, b: v32i8) -> v32i8 { - __lasx_xvadd_b(a, b) +pub fn lasx_xvadd_b(a: v32i8, b: v32i8) -> v32i8 { + unsafe { __lasx_xvadd_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvadd_h(a: v16i16, b: v16i16) -> v16i16 { - __lasx_xvadd_h(a, b) +pub fn lasx_xvadd_h(a: v16i16, b: v16i16) -> v16i16 { + unsafe { __lasx_xvadd_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvadd_w(a: v8i32, b: v8i32) -> v8i32 { - __lasx_xvadd_w(a, b) +pub fn lasx_xvadd_w(a: v8i32, b: v8i32) -> v8i32 { + unsafe { __lasx_xvadd_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvadd_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvadd_d(a, b) +pub fn lasx_xvadd_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvadd_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvaddi_bu(a: v32i8) -> v32i8 { +pub fn lasx_xvaddi_bu(a: v32i8) -> v32i8 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvaddi_bu(a, IMM5) + unsafe { __lasx_xvaddi_bu(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvaddi_hu(a: v16i16) -> v16i16 { +pub fn lasx_xvaddi_hu(a: v16i16) -> v16i16 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvaddi_hu(a, IMM5) + unsafe { __lasx_xvaddi_hu(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvaddi_wu(a: v8i32) -> v8i32 { +pub fn lasx_xvaddi_wu(a: v8i32) -> v8i32 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvaddi_wu(a, IMM5) + unsafe { __lasx_xvaddi_wu(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvaddi_du(a: v4i64) -> v4i64 { +pub fn lasx_xvaddi_du(a: v4i64) -> v4i64 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvaddi_du(a, IMM5) + unsafe { __lasx_xvaddi_du(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsub_b(a: v32i8, b: v32i8) -> v32i8 { - __lasx_xvsub_b(a, b) +pub fn lasx_xvsub_b(a: v32i8, b: v32i8) -> v32i8 { + unsafe { __lasx_xvsub_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsub_h(a: v16i16, b: v16i16) -> v16i16 { - __lasx_xvsub_h(a, b) +pub fn lasx_xvsub_h(a: v16i16, b: v16i16) -> v16i16 { + unsafe { __lasx_xvsub_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsub_w(a: v8i32, b: v8i32) -> v8i32 { - __lasx_xvsub_w(a, b) +pub fn lasx_xvsub_w(a: v8i32, b: v8i32) -> v8i32 { + unsafe { __lasx_xvsub_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsub_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvsub_d(a, b) +pub fn lasx_xvsub_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvsub_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsubi_bu(a: v32i8) -> v32i8 { +pub fn lasx_xvsubi_bu(a: v32i8) -> v32i8 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvsubi_bu(a, IMM5) + unsafe { __lasx_xvsubi_bu(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsubi_hu(a: v16i16) -> v16i16 { +pub fn lasx_xvsubi_hu(a: v16i16) -> v16i16 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvsubi_hu(a, IMM5) + unsafe { __lasx_xvsubi_hu(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsubi_wu(a: v8i32) -> v8i32 { +pub fn lasx_xvsubi_wu(a: v8i32) -> v8i32 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvsubi_wu(a, IMM5) + unsafe { __lasx_xvsubi_wu(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsubi_du(a: v4i64) -> v4i64 { +pub fn lasx_xvsubi_du(a: v4i64) -> v4i64 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvsubi_du(a, IMM5) + unsafe { __lasx_xvsubi_du(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmax_b(a: v32i8, b: v32i8) -> v32i8 { - __lasx_xvmax_b(a, b) +pub fn lasx_xvmax_b(a: v32i8, b: v32i8) -> v32i8 { + unsafe { __lasx_xvmax_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmax_h(a: v16i16, b: v16i16) -> v16i16 { - __lasx_xvmax_h(a, b) +pub fn lasx_xvmax_h(a: v16i16, b: v16i16) -> v16i16 { + unsafe { __lasx_xvmax_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmax_w(a: v8i32, b: v8i32) -> v8i32 { - __lasx_xvmax_w(a, b) +pub fn lasx_xvmax_w(a: v8i32, b: v8i32) -> v8i32 { + unsafe { __lasx_xvmax_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmax_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvmax_d(a, b) +pub fn lasx_xvmax_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvmax_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmaxi_b(a: v32i8) -> v32i8 { +pub fn lasx_xvmaxi_b(a: v32i8) -> v32i8 { static_assert_simm_bits!(IMM_S5, 5); - __lasx_xvmaxi_b(a, IMM_S5) + unsafe { __lasx_xvmaxi_b(a, IMM_S5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmaxi_h(a: v16i16) -> v16i16 { +pub fn lasx_xvmaxi_h(a: v16i16) -> v16i16 { static_assert_simm_bits!(IMM_S5, 5); - __lasx_xvmaxi_h(a, IMM_S5) + unsafe { __lasx_xvmaxi_h(a, IMM_S5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmaxi_w(a: v8i32) -> v8i32 { +pub fn lasx_xvmaxi_w(a: v8i32) -> v8i32 { static_assert_simm_bits!(IMM_S5, 5); - __lasx_xvmaxi_w(a, IMM_S5) + unsafe { __lasx_xvmaxi_w(a, IMM_S5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmaxi_d(a: v4i64) -> v4i64 { +pub fn lasx_xvmaxi_d(a: v4i64) -> v4i64 { static_assert_simm_bits!(IMM_S5, 5); - __lasx_xvmaxi_d(a, IMM_S5) + unsafe { __lasx_xvmaxi_d(a, IMM_S5) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmax_bu(a: v32u8, b: v32u8) -> v32u8 { - __lasx_xvmax_bu(a, b) +pub fn lasx_xvmax_bu(a: v32u8, b: v32u8) -> v32u8 { + unsafe { __lasx_xvmax_bu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmax_hu(a: v16u16, b: v16u16) -> v16u16 { - __lasx_xvmax_hu(a, b) +pub fn lasx_xvmax_hu(a: v16u16, b: v16u16) -> v16u16 { + unsafe { __lasx_xvmax_hu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmax_wu(a: v8u32, b: v8u32) -> v8u32 { - __lasx_xvmax_wu(a, b) +pub fn lasx_xvmax_wu(a: v8u32, b: v8u32) -> v8u32 { + unsafe { __lasx_xvmax_wu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmax_du(a: v4u64, b: v4u64) -> v4u64 { - __lasx_xvmax_du(a, b) +pub fn lasx_xvmax_du(a: v4u64, b: v4u64) -> v4u64 { + unsafe { __lasx_xvmax_du(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmaxi_bu(a: v32u8) -> v32u8 { +pub fn lasx_xvmaxi_bu(a: v32u8) -> v32u8 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvmaxi_bu(a, IMM5) + unsafe { __lasx_xvmaxi_bu(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmaxi_hu(a: v16u16) -> v16u16 { +pub fn lasx_xvmaxi_hu(a: v16u16) -> v16u16 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvmaxi_hu(a, IMM5) + unsafe { __lasx_xvmaxi_hu(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmaxi_wu(a: v8u32) -> v8u32 { +pub fn lasx_xvmaxi_wu(a: v8u32) -> v8u32 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvmaxi_wu(a, IMM5) + unsafe { __lasx_xvmaxi_wu(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmaxi_du(a: v4u64) -> v4u64 { +pub fn lasx_xvmaxi_du(a: v4u64) -> v4u64 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvmaxi_du(a, IMM5) + unsafe { __lasx_xvmaxi_du(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmin_b(a: v32i8, b: v32i8) -> v32i8 { - __lasx_xvmin_b(a, b) +pub fn lasx_xvmin_b(a: v32i8, b: v32i8) -> v32i8 { + unsafe { __lasx_xvmin_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmin_h(a: v16i16, b: v16i16) -> v16i16 { - __lasx_xvmin_h(a, b) +pub fn lasx_xvmin_h(a: v16i16, b: v16i16) -> v16i16 { + unsafe { __lasx_xvmin_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmin_w(a: v8i32, b: v8i32) -> v8i32 { - __lasx_xvmin_w(a, b) +pub fn lasx_xvmin_w(a: v8i32, b: v8i32) -> v8i32 { + unsafe { __lasx_xvmin_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmin_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvmin_d(a, b) +pub fn lasx_xvmin_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvmin_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmini_b(a: v32i8) -> v32i8 { +pub fn lasx_xvmini_b(a: v32i8) -> v32i8 { static_assert_simm_bits!(IMM_S5, 5); - __lasx_xvmini_b(a, IMM_S5) + unsafe { __lasx_xvmini_b(a, IMM_S5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmini_h(a: v16i16) -> v16i16 { +pub fn lasx_xvmini_h(a: v16i16) -> v16i16 { static_assert_simm_bits!(IMM_S5, 5); - __lasx_xvmini_h(a, IMM_S5) + unsafe { __lasx_xvmini_h(a, IMM_S5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmini_w(a: v8i32) -> v8i32 { +pub fn lasx_xvmini_w(a: v8i32) -> v8i32 { static_assert_simm_bits!(IMM_S5, 5); - __lasx_xvmini_w(a, IMM_S5) + unsafe { __lasx_xvmini_w(a, IMM_S5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmini_d(a: v4i64) -> v4i64 { +pub fn lasx_xvmini_d(a: v4i64) -> v4i64 { static_assert_simm_bits!(IMM_S5, 5); - __lasx_xvmini_d(a, IMM_S5) + unsafe { __lasx_xvmini_d(a, IMM_S5) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmin_bu(a: v32u8, b: v32u8) -> v32u8 { - __lasx_xvmin_bu(a, b) +pub fn lasx_xvmin_bu(a: v32u8, b: v32u8) -> v32u8 { + unsafe { __lasx_xvmin_bu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmin_hu(a: v16u16, b: v16u16) -> v16u16 { - __lasx_xvmin_hu(a, b) +pub fn lasx_xvmin_hu(a: v16u16, b: v16u16) -> v16u16 { + unsafe { __lasx_xvmin_hu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmin_wu(a: v8u32, b: v8u32) -> v8u32 { - __lasx_xvmin_wu(a, b) +pub fn lasx_xvmin_wu(a: v8u32, b: v8u32) -> v8u32 { + unsafe { __lasx_xvmin_wu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmin_du(a: v4u64, b: v4u64) -> v4u64 { - __lasx_xvmin_du(a, b) +pub fn lasx_xvmin_du(a: v4u64, b: v4u64) -> v4u64 { + unsafe { __lasx_xvmin_du(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmini_bu(a: v32u8) -> v32u8 { +pub fn lasx_xvmini_bu(a: v32u8) -> v32u8 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvmini_bu(a, IMM5) + unsafe { __lasx_xvmini_bu(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmini_hu(a: v16u16) -> v16u16 { +pub fn lasx_xvmini_hu(a: v16u16) -> v16u16 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvmini_hu(a, IMM5) + unsafe { __lasx_xvmini_hu(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmini_wu(a: v8u32) -> v8u32 { +pub fn lasx_xvmini_wu(a: v8u32) -> v8u32 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvmini_wu(a, IMM5) + unsafe { __lasx_xvmini_wu(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmini_du(a: v4u64) -> v4u64 { +pub fn lasx_xvmini_du(a: v4u64) -> v4u64 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvmini_du(a, IMM5) + unsafe { __lasx_xvmini_du(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvseq_b(a: v32i8, b: v32i8) -> v32i8 { - __lasx_xvseq_b(a, b) +pub fn lasx_xvseq_b(a: v32i8, b: v32i8) -> v32i8 { + unsafe { __lasx_xvseq_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvseq_h(a: v16i16, b: v16i16) -> v16i16 { - __lasx_xvseq_h(a, b) +pub fn lasx_xvseq_h(a: v16i16, b: v16i16) -> v16i16 { + unsafe { __lasx_xvseq_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvseq_w(a: v8i32, b: v8i32) -> v8i32 { - __lasx_xvseq_w(a, b) +pub fn lasx_xvseq_w(a: v8i32, b: v8i32) -> v8i32 { + unsafe { __lasx_xvseq_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvseq_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvseq_d(a, b) +pub fn lasx_xvseq_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvseq_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvseqi_b(a: v32i8) -> v32i8 { +pub fn lasx_xvseqi_b(a: v32i8) -> v32i8 { static_assert_simm_bits!(IMM_S5, 5); - __lasx_xvseqi_b(a, IMM_S5) + unsafe { __lasx_xvseqi_b(a, IMM_S5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvseqi_h(a: v16i16) -> v16i16 { +pub fn lasx_xvseqi_h(a: v16i16) -> v16i16 { static_assert_simm_bits!(IMM_S5, 5); - __lasx_xvseqi_h(a, IMM_S5) + unsafe { __lasx_xvseqi_h(a, IMM_S5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvseqi_w(a: v8i32) -> v8i32 { +pub fn lasx_xvseqi_w(a: v8i32) -> v8i32 { static_assert_simm_bits!(IMM_S5, 5); - __lasx_xvseqi_w(a, IMM_S5) + unsafe { __lasx_xvseqi_w(a, IMM_S5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvseqi_d(a: v4i64) -> v4i64 { +pub fn lasx_xvseqi_d(a: v4i64) -> v4i64 { static_assert_simm_bits!(IMM_S5, 5); - __lasx_xvseqi_d(a, IMM_S5) + unsafe { __lasx_xvseqi_d(a, IMM_S5) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvslt_b(a: v32i8, b: v32i8) -> v32i8 { - __lasx_xvslt_b(a, b) +pub fn lasx_xvslt_b(a: v32i8, b: v32i8) -> v32i8 { + unsafe { __lasx_xvslt_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvslt_h(a: v16i16, b: v16i16) -> v16i16 { - __lasx_xvslt_h(a, b) +pub fn lasx_xvslt_h(a: v16i16, b: v16i16) -> v16i16 { + unsafe { __lasx_xvslt_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvslt_w(a: v8i32, b: v8i32) -> v8i32 { - __lasx_xvslt_w(a, b) +pub fn lasx_xvslt_w(a: v8i32, b: v8i32) -> v8i32 { + unsafe { __lasx_xvslt_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvslt_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvslt_d(a, b) +pub fn lasx_xvslt_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvslt_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvslti_b(a: v32i8) -> v32i8 { +pub fn lasx_xvslti_b(a: v32i8) -> v32i8 { static_assert_simm_bits!(IMM_S5, 5); - __lasx_xvslti_b(a, IMM_S5) + unsafe { __lasx_xvslti_b(a, IMM_S5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvslti_h(a: v16i16) -> v16i16 { +pub fn lasx_xvslti_h(a: v16i16) -> v16i16 { static_assert_simm_bits!(IMM_S5, 5); - __lasx_xvslti_h(a, IMM_S5) + unsafe { __lasx_xvslti_h(a, IMM_S5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvslti_w(a: v8i32) -> v8i32 { +pub fn lasx_xvslti_w(a: v8i32) -> v8i32 { static_assert_simm_bits!(IMM_S5, 5); - __lasx_xvslti_w(a, IMM_S5) + unsafe { __lasx_xvslti_w(a, IMM_S5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvslti_d(a: v4i64) -> v4i64 { +pub fn lasx_xvslti_d(a: v4i64) -> v4i64 { static_assert_simm_bits!(IMM_S5, 5); - __lasx_xvslti_d(a, IMM_S5) + unsafe { __lasx_xvslti_d(a, IMM_S5) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvslt_bu(a: v32u8, b: v32u8) -> v32i8 { - __lasx_xvslt_bu(a, b) +pub fn lasx_xvslt_bu(a: v32u8, b: v32u8) -> v32i8 { + unsafe { __lasx_xvslt_bu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvslt_hu(a: v16u16, b: v16u16) -> v16i16 { - __lasx_xvslt_hu(a, b) +pub fn lasx_xvslt_hu(a: v16u16, b: v16u16) -> v16i16 { + unsafe { __lasx_xvslt_hu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvslt_wu(a: v8u32, b: v8u32) -> v8i32 { - __lasx_xvslt_wu(a, b) +pub fn lasx_xvslt_wu(a: v8u32, b: v8u32) -> v8i32 { + unsafe { __lasx_xvslt_wu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvslt_du(a: v4u64, b: v4u64) -> v4i64 { - __lasx_xvslt_du(a, b) +pub fn lasx_xvslt_du(a: v4u64, b: v4u64) -> v4i64 { + unsafe { __lasx_xvslt_du(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvslti_bu(a: v32u8) -> v32i8 { +pub fn lasx_xvslti_bu(a: v32u8) -> v32i8 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvslti_bu(a, IMM5) + unsafe { __lasx_xvslti_bu(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvslti_hu(a: v16u16) -> v16i16 { +pub fn lasx_xvslti_hu(a: v16u16) -> v16i16 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvslti_hu(a, IMM5) + unsafe { __lasx_xvslti_hu(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvslti_wu(a: v8u32) -> v8i32 { +pub fn lasx_xvslti_wu(a: v8u32) -> v8i32 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvslti_wu(a, IMM5) + unsafe { __lasx_xvslti_wu(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvslti_du(a: v4u64) -> v4i64 { +pub fn lasx_xvslti_du(a: v4u64) -> v4i64 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvslti_du(a, IMM5) + unsafe { __lasx_xvslti_du(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsle_b(a: v32i8, b: v32i8) -> v32i8 { - __lasx_xvsle_b(a, b) +pub fn lasx_xvsle_b(a: v32i8, b: v32i8) -> v32i8 { + unsafe { __lasx_xvsle_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsle_h(a: v16i16, b: v16i16) -> v16i16 { - __lasx_xvsle_h(a, b) +pub fn lasx_xvsle_h(a: v16i16, b: v16i16) -> v16i16 { + unsafe { __lasx_xvsle_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsle_w(a: v8i32, b: v8i32) -> v8i32 { - __lasx_xvsle_w(a, b) +pub fn lasx_xvsle_w(a: v8i32, b: v8i32) -> v8i32 { + unsafe { __lasx_xvsle_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsle_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvsle_d(a, b) +pub fn lasx_xvsle_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvsle_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvslei_b(a: v32i8) -> v32i8 { +pub fn lasx_xvslei_b(a: v32i8) -> v32i8 { static_assert_simm_bits!(IMM_S5, 5); - __lasx_xvslei_b(a, IMM_S5) + unsafe { __lasx_xvslei_b(a, IMM_S5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvslei_h(a: v16i16) -> v16i16 { +pub fn lasx_xvslei_h(a: v16i16) -> v16i16 { static_assert_simm_bits!(IMM_S5, 5); - __lasx_xvslei_h(a, IMM_S5) + unsafe { __lasx_xvslei_h(a, IMM_S5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvslei_w(a: v8i32) -> v8i32 { +pub fn lasx_xvslei_w(a: v8i32) -> v8i32 { static_assert_simm_bits!(IMM_S5, 5); - __lasx_xvslei_w(a, IMM_S5) + unsafe { __lasx_xvslei_w(a, IMM_S5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvslei_d(a: v4i64) -> v4i64 { +pub fn lasx_xvslei_d(a: v4i64) -> v4i64 { static_assert_simm_bits!(IMM_S5, 5); - __lasx_xvslei_d(a, IMM_S5) + unsafe { __lasx_xvslei_d(a, IMM_S5) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsle_bu(a: v32u8, b: v32u8) -> v32i8 { - __lasx_xvsle_bu(a, b) +pub fn lasx_xvsle_bu(a: v32u8, b: v32u8) -> v32i8 { + unsafe { __lasx_xvsle_bu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsle_hu(a: v16u16, b: v16u16) -> v16i16 { - __lasx_xvsle_hu(a, b) +pub fn lasx_xvsle_hu(a: v16u16, b: v16u16) -> v16i16 { + unsafe { __lasx_xvsle_hu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsle_wu(a: v8u32, b: v8u32) -> v8i32 { - __lasx_xvsle_wu(a, b) +pub fn lasx_xvsle_wu(a: v8u32, b: v8u32) -> v8i32 { + unsafe { __lasx_xvsle_wu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsle_du(a: v4u64, b: v4u64) -> v4i64 { - __lasx_xvsle_du(a, b) +pub fn lasx_xvsle_du(a: v4u64, b: v4u64) -> v4i64 { + unsafe { __lasx_xvsle_du(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvslei_bu(a: v32u8) -> v32i8 { +pub fn lasx_xvslei_bu(a: v32u8) -> v32i8 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvslei_bu(a, IMM5) + unsafe { __lasx_xvslei_bu(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvslei_hu(a: v16u16) -> v16i16 { +pub fn lasx_xvslei_hu(a: v16u16) -> v16i16 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvslei_hu(a, IMM5) + unsafe { __lasx_xvslei_hu(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvslei_wu(a: v8u32) -> v8i32 { +pub fn lasx_xvslei_wu(a: v8u32) -> v8i32 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvslei_wu(a, IMM5) + unsafe { __lasx_xvslei_wu(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvslei_du(a: v4u64) -> v4i64 { +pub fn lasx_xvslei_du(a: v4u64) -> v4i64 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvslei_du(a, IMM5) + unsafe { __lasx_xvslei_du(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsat_b(a: v32i8) -> v32i8 { +pub fn lasx_xvsat_b(a: v32i8) -> v32i8 { static_assert_uimm_bits!(IMM3, 3); - __lasx_xvsat_b(a, IMM3) + unsafe { __lasx_xvsat_b(a, IMM3) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsat_h(a: v16i16) -> v16i16 { +pub fn lasx_xvsat_h(a: v16i16) -> v16i16 { static_assert_uimm_bits!(IMM4, 4); - __lasx_xvsat_h(a, IMM4) + unsafe { __lasx_xvsat_h(a, IMM4) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsat_w(a: v8i32) -> v8i32 { +pub fn lasx_xvsat_w(a: v8i32) -> v8i32 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvsat_w(a, IMM5) + unsafe { __lasx_xvsat_w(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsat_d(a: v4i64) -> v4i64 { +pub fn lasx_xvsat_d(a: v4i64) -> v4i64 { static_assert_uimm_bits!(IMM6, 6); - __lasx_xvsat_d(a, IMM6) + unsafe { __lasx_xvsat_d(a, IMM6) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsat_bu(a: v32u8) -> v32u8 { +pub fn lasx_xvsat_bu(a: v32u8) -> v32u8 { static_assert_uimm_bits!(IMM3, 3); - __lasx_xvsat_bu(a, IMM3) + unsafe { __lasx_xvsat_bu(a, IMM3) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsat_hu(a: v16u16) -> v16u16 { +pub fn lasx_xvsat_hu(a: v16u16) -> v16u16 { static_assert_uimm_bits!(IMM4, 4); - __lasx_xvsat_hu(a, IMM4) + unsafe { __lasx_xvsat_hu(a, IMM4) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsat_wu(a: v8u32) -> v8u32 { +pub fn lasx_xvsat_wu(a: v8u32) -> v8u32 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvsat_wu(a, IMM5) + unsafe { __lasx_xvsat_wu(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsat_du(a: v4u64) -> v4u64 { +pub fn lasx_xvsat_du(a: v4u64) -> v4u64 { static_assert_uimm_bits!(IMM6, 6); - __lasx_xvsat_du(a, IMM6) + unsafe { __lasx_xvsat_du(a, IMM6) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvadda_b(a: v32i8, b: v32i8) -> v32i8 { - __lasx_xvadda_b(a, b) +pub fn lasx_xvadda_b(a: v32i8, b: v32i8) -> v32i8 { + unsafe { __lasx_xvadda_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvadda_h(a: v16i16, b: v16i16) -> v16i16 { - __lasx_xvadda_h(a, b) +pub fn lasx_xvadda_h(a: v16i16, b: v16i16) -> v16i16 { + unsafe { __lasx_xvadda_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvadda_w(a: v8i32, b: v8i32) -> v8i32 { - __lasx_xvadda_w(a, b) +pub fn lasx_xvadda_w(a: v8i32, b: v8i32) -> v8i32 { + unsafe { __lasx_xvadda_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvadda_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvadda_d(a, b) +pub fn lasx_xvadda_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvadda_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsadd_b(a: v32i8, b: v32i8) -> v32i8 { - __lasx_xvsadd_b(a, b) +pub fn lasx_xvsadd_b(a: v32i8, b: v32i8) -> v32i8 { + unsafe { __lasx_xvsadd_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsadd_h(a: v16i16, b: v16i16) -> v16i16 { - __lasx_xvsadd_h(a, b) +pub fn lasx_xvsadd_h(a: v16i16, b: v16i16) -> v16i16 { + unsafe { __lasx_xvsadd_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsadd_w(a: v8i32, b: v8i32) -> v8i32 { - __lasx_xvsadd_w(a, b) +pub fn lasx_xvsadd_w(a: v8i32, b: v8i32) -> v8i32 { + unsafe { __lasx_xvsadd_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsadd_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvsadd_d(a, b) +pub fn lasx_xvsadd_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvsadd_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsadd_bu(a: v32u8, b: v32u8) -> v32u8 { - __lasx_xvsadd_bu(a, b) +pub fn lasx_xvsadd_bu(a: v32u8, b: v32u8) -> v32u8 { + unsafe { __lasx_xvsadd_bu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsadd_hu(a: v16u16, b: v16u16) -> v16u16 { - __lasx_xvsadd_hu(a, b) +pub fn lasx_xvsadd_hu(a: v16u16, b: v16u16) -> v16u16 { + unsafe { __lasx_xvsadd_hu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsadd_wu(a: v8u32, b: v8u32) -> v8u32 { - __lasx_xvsadd_wu(a, b) +pub fn lasx_xvsadd_wu(a: v8u32, b: v8u32) -> v8u32 { + unsafe { __lasx_xvsadd_wu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsadd_du(a: v4u64, b: v4u64) -> v4u64 { - __lasx_xvsadd_du(a, b) +pub fn lasx_xvsadd_du(a: v4u64, b: v4u64) -> v4u64 { + unsafe { __lasx_xvsadd_du(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvavg_b(a: v32i8, b: v32i8) -> v32i8 { - __lasx_xvavg_b(a, b) +pub fn lasx_xvavg_b(a: v32i8, b: v32i8) -> v32i8 { + unsafe { __lasx_xvavg_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvavg_h(a: v16i16, b: v16i16) -> v16i16 { - __lasx_xvavg_h(a, b) +pub fn lasx_xvavg_h(a: v16i16, b: v16i16) -> v16i16 { + unsafe { __lasx_xvavg_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvavg_w(a: v8i32, b: v8i32) -> v8i32 { - __lasx_xvavg_w(a, b) +pub fn lasx_xvavg_w(a: v8i32, b: v8i32) -> v8i32 { + unsafe { __lasx_xvavg_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvavg_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvavg_d(a, b) +pub fn lasx_xvavg_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvavg_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvavg_bu(a: v32u8, b: v32u8) -> v32u8 { - __lasx_xvavg_bu(a, b) +pub fn lasx_xvavg_bu(a: v32u8, b: v32u8) -> v32u8 { + unsafe { __lasx_xvavg_bu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvavg_hu(a: v16u16, b: v16u16) -> v16u16 { - __lasx_xvavg_hu(a, b) +pub fn lasx_xvavg_hu(a: v16u16, b: v16u16) -> v16u16 { + unsafe { __lasx_xvavg_hu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvavg_wu(a: v8u32, b: v8u32) -> v8u32 { - __lasx_xvavg_wu(a, b) +pub fn lasx_xvavg_wu(a: v8u32, b: v8u32) -> v8u32 { + unsafe { __lasx_xvavg_wu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvavg_du(a: v4u64, b: v4u64) -> v4u64 { - __lasx_xvavg_du(a, b) +pub fn lasx_xvavg_du(a: v4u64, b: v4u64) -> v4u64 { + unsafe { __lasx_xvavg_du(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvavgr_b(a: v32i8, b: v32i8) -> v32i8 { - __lasx_xvavgr_b(a, b) +pub fn lasx_xvavgr_b(a: v32i8, b: v32i8) -> v32i8 { + unsafe { __lasx_xvavgr_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvavgr_h(a: v16i16, b: v16i16) -> v16i16 { - __lasx_xvavgr_h(a, b) +pub fn lasx_xvavgr_h(a: v16i16, b: v16i16) -> v16i16 { + unsafe { __lasx_xvavgr_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvavgr_w(a: v8i32, b: v8i32) -> v8i32 { - __lasx_xvavgr_w(a, b) +pub fn lasx_xvavgr_w(a: v8i32, b: v8i32) -> v8i32 { + unsafe { __lasx_xvavgr_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvavgr_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvavgr_d(a, b) +pub fn lasx_xvavgr_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvavgr_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvavgr_bu(a: v32u8, b: v32u8) -> v32u8 { - __lasx_xvavgr_bu(a, b) +pub fn lasx_xvavgr_bu(a: v32u8, b: v32u8) -> v32u8 { + unsafe { __lasx_xvavgr_bu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvavgr_hu(a: v16u16, b: v16u16) -> v16u16 { - __lasx_xvavgr_hu(a, b) +pub fn lasx_xvavgr_hu(a: v16u16, b: v16u16) -> v16u16 { + unsafe { __lasx_xvavgr_hu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvavgr_wu(a: v8u32, b: v8u32) -> v8u32 { - __lasx_xvavgr_wu(a, b) +pub fn lasx_xvavgr_wu(a: v8u32, b: v8u32) -> v8u32 { + unsafe { __lasx_xvavgr_wu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvavgr_du(a: v4u64, b: v4u64) -> v4u64 { - __lasx_xvavgr_du(a, b) +pub fn lasx_xvavgr_du(a: v4u64, b: v4u64) -> v4u64 { + unsafe { __lasx_xvavgr_du(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssub_b(a: v32i8, b: v32i8) -> v32i8 { - __lasx_xvssub_b(a, b) +pub fn lasx_xvssub_b(a: v32i8, b: v32i8) -> v32i8 { + unsafe { __lasx_xvssub_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssub_h(a: v16i16, b: v16i16) -> v16i16 { - __lasx_xvssub_h(a, b) +pub fn lasx_xvssub_h(a: v16i16, b: v16i16) -> v16i16 { + unsafe { __lasx_xvssub_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssub_w(a: v8i32, b: v8i32) -> v8i32 { - __lasx_xvssub_w(a, b) +pub fn lasx_xvssub_w(a: v8i32, b: v8i32) -> v8i32 { + unsafe { __lasx_xvssub_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssub_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvssub_d(a, b) +pub fn lasx_xvssub_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvssub_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssub_bu(a: v32u8, b: v32u8) -> v32u8 { - __lasx_xvssub_bu(a, b) +pub fn lasx_xvssub_bu(a: v32u8, b: v32u8) -> v32u8 { + unsafe { __lasx_xvssub_bu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssub_hu(a: v16u16, b: v16u16) -> v16u16 { - __lasx_xvssub_hu(a, b) +pub fn lasx_xvssub_hu(a: v16u16, b: v16u16) -> v16u16 { + unsafe { __lasx_xvssub_hu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssub_wu(a: v8u32, b: v8u32) -> v8u32 { - __lasx_xvssub_wu(a, b) +pub fn lasx_xvssub_wu(a: v8u32, b: v8u32) -> v8u32 { + unsafe { __lasx_xvssub_wu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssub_du(a: v4u64, b: v4u64) -> v4u64 { - __lasx_xvssub_du(a, b) +pub fn lasx_xvssub_du(a: v4u64, b: v4u64) -> v4u64 { + unsafe { __lasx_xvssub_du(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvabsd_b(a: v32i8, b: v32i8) -> v32i8 { - __lasx_xvabsd_b(a, b) +pub fn lasx_xvabsd_b(a: v32i8, b: v32i8) -> v32i8 { + unsafe { __lasx_xvabsd_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvabsd_h(a: v16i16, b: v16i16) -> v16i16 { - __lasx_xvabsd_h(a, b) +pub fn lasx_xvabsd_h(a: v16i16, b: v16i16) -> v16i16 { + unsafe { __lasx_xvabsd_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvabsd_w(a: v8i32, b: v8i32) -> v8i32 { - __lasx_xvabsd_w(a, b) +pub fn lasx_xvabsd_w(a: v8i32, b: v8i32) -> v8i32 { + unsafe { __lasx_xvabsd_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvabsd_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvabsd_d(a, b) +pub fn lasx_xvabsd_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvabsd_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvabsd_bu(a: v32u8, b: v32u8) -> v32u8 { - __lasx_xvabsd_bu(a, b) +pub fn lasx_xvabsd_bu(a: v32u8, b: v32u8) -> v32u8 { + unsafe { __lasx_xvabsd_bu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvabsd_hu(a: v16u16, b: v16u16) -> v16u16 { - __lasx_xvabsd_hu(a, b) +pub fn lasx_xvabsd_hu(a: v16u16, b: v16u16) -> v16u16 { + unsafe { __lasx_xvabsd_hu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvabsd_wu(a: v8u32, b: v8u32) -> v8u32 { - __lasx_xvabsd_wu(a, b) +pub fn lasx_xvabsd_wu(a: v8u32, b: v8u32) -> v8u32 { + unsafe { __lasx_xvabsd_wu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvabsd_du(a: v4u64, b: v4u64) -> v4u64 { - __lasx_xvabsd_du(a, b) +pub fn lasx_xvabsd_du(a: v4u64, b: v4u64) -> v4u64 { + unsafe { __lasx_xvabsd_du(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmul_b(a: v32i8, b: v32i8) -> v32i8 { - __lasx_xvmul_b(a, b) +pub fn lasx_xvmul_b(a: v32i8, b: v32i8) -> v32i8 { + unsafe { __lasx_xvmul_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmul_h(a: v16i16, b: v16i16) -> v16i16 { - __lasx_xvmul_h(a, b) +pub fn lasx_xvmul_h(a: v16i16, b: v16i16) -> v16i16 { + unsafe { __lasx_xvmul_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmul_w(a: v8i32, b: v8i32) -> v8i32 { - __lasx_xvmul_w(a, b) +pub fn lasx_xvmul_w(a: v8i32, b: v8i32) -> v8i32 { + unsafe { __lasx_xvmul_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmul_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvmul_d(a, b) +pub fn lasx_xvmul_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvmul_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmadd_b(a: v32i8, b: v32i8, c: v32i8) -> v32i8 { - __lasx_xvmadd_b(a, b, c) +pub fn lasx_xvmadd_b(a: v32i8, b: v32i8, c: v32i8) -> v32i8 { + unsafe { __lasx_xvmadd_b(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmadd_h(a: v16i16, b: v16i16, c: v16i16) -> v16i16 { - __lasx_xvmadd_h(a, b, c) +pub fn lasx_xvmadd_h(a: v16i16, b: v16i16, c: v16i16) -> v16i16 { + unsafe { __lasx_xvmadd_h(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmadd_w(a: v8i32, b: v8i32, c: v8i32) -> v8i32 { - __lasx_xvmadd_w(a, b, c) +pub fn lasx_xvmadd_w(a: v8i32, b: v8i32, c: v8i32) -> v8i32 { + unsafe { __lasx_xvmadd_w(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmadd_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64 { - __lasx_xvmadd_d(a, b, c) +pub fn lasx_xvmadd_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64 { + unsafe { __lasx_xvmadd_d(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmsub_b(a: v32i8, b: v32i8, c: v32i8) -> v32i8 { - __lasx_xvmsub_b(a, b, c) +pub fn lasx_xvmsub_b(a: v32i8, b: v32i8, c: v32i8) -> v32i8 { + unsafe { __lasx_xvmsub_b(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmsub_h(a: v16i16, b: v16i16, c: v16i16) -> v16i16 { - __lasx_xvmsub_h(a, b, c) +pub fn lasx_xvmsub_h(a: v16i16, b: v16i16, c: v16i16) -> v16i16 { + unsafe { __lasx_xvmsub_h(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmsub_w(a: v8i32, b: v8i32, c: v8i32) -> v8i32 { - __lasx_xvmsub_w(a, b, c) +pub fn lasx_xvmsub_w(a: v8i32, b: v8i32, c: v8i32) -> v8i32 { + unsafe { __lasx_xvmsub_w(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmsub_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64 { - __lasx_xvmsub_d(a, b, c) +pub fn lasx_xvmsub_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64 { + unsafe { __lasx_xvmsub_d(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvdiv_b(a: v32i8, b: v32i8) -> v32i8 { - __lasx_xvdiv_b(a, b) +pub fn lasx_xvdiv_b(a: v32i8, b: v32i8) -> v32i8 { + unsafe { __lasx_xvdiv_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvdiv_h(a: v16i16, b: v16i16) -> v16i16 { - __lasx_xvdiv_h(a, b) +pub fn lasx_xvdiv_h(a: v16i16, b: v16i16) -> v16i16 { + unsafe { __lasx_xvdiv_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvdiv_w(a: v8i32, b: v8i32) -> v8i32 { - __lasx_xvdiv_w(a, b) +pub fn lasx_xvdiv_w(a: v8i32, b: v8i32) -> v8i32 { + unsafe { __lasx_xvdiv_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvdiv_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvdiv_d(a, b) +pub fn lasx_xvdiv_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvdiv_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvdiv_bu(a: v32u8, b: v32u8) -> v32u8 { - __lasx_xvdiv_bu(a, b) +pub fn lasx_xvdiv_bu(a: v32u8, b: v32u8) -> v32u8 { + unsafe { __lasx_xvdiv_bu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvdiv_hu(a: v16u16, b: v16u16) -> v16u16 { - __lasx_xvdiv_hu(a, b) +pub fn lasx_xvdiv_hu(a: v16u16, b: v16u16) -> v16u16 { + unsafe { __lasx_xvdiv_hu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvdiv_wu(a: v8u32, b: v8u32) -> v8u32 { - __lasx_xvdiv_wu(a, b) +pub fn lasx_xvdiv_wu(a: v8u32, b: v8u32) -> v8u32 { + unsafe { __lasx_xvdiv_wu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvdiv_du(a: v4u64, b: v4u64) -> v4u64 { - __lasx_xvdiv_du(a, b) +pub fn lasx_xvdiv_du(a: v4u64, b: v4u64) -> v4u64 { + unsafe { __lasx_xvdiv_du(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvhaddw_h_b(a: v32i8, b: v32i8) -> v16i16 { - __lasx_xvhaddw_h_b(a, b) +pub fn lasx_xvhaddw_h_b(a: v32i8, b: v32i8) -> v16i16 { + unsafe { __lasx_xvhaddw_h_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvhaddw_w_h(a: v16i16, b: v16i16) -> v8i32 { - __lasx_xvhaddw_w_h(a, b) +pub fn lasx_xvhaddw_w_h(a: v16i16, b: v16i16) -> v8i32 { + unsafe { __lasx_xvhaddw_w_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvhaddw_d_w(a: v8i32, b: v8i32) -> v4i64 { - __lasx_xvhaddw_d_w(a, b) +pub fn lasx_xvhaddw_d_w(a: v8i32, b: v8i32) -> v4i64 { + unsafe { __lasx_xvhaddw_d_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvhaddw_hu_bu(a: v32u8, b: v32u8) -> v16u16 { - __lasx_xvhaddw_hu_bu(a, b) +pub fn lasx_xvhaddw_hu_bu(a: v32u8, b: v32u8) -> v16u16 { + unsafe { __lasx_xvhaddw_hu_bu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvhaddw_wu_hu(a: v16u16, b: v16u16) -> v8u32 { - __lasx_xvhaddw_wu_hu(a, b) +pub fn lasx_xvhaddw_wu_hu(a: v16u16, b: v16u16) -> v8u32 { + unsafe { __lasx_xvhaddw_wu_hu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvhaddw_du_wu(a: v8u32, b: v8u32) -> v4u64 { - __lasx_xvhaddw_du_wu(a, b) +pub fn lasx_xvhaddw_du_wu(a: v8u32, b: v8u32) -> v4u64 { + unsafe { __lasx_xvhaddw_du_wu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvhsubw_h_b(a: v32i8, b: v32i8) -> v16i16 { - __lasx_xvhsubw_h_b(a, b) +pub fn lasx_xvhsubw_h_b(a: v32i8, b: v32i8) -> v16i16 { + unsafe { __lasx_xvhsubw_h_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvhsubw_w_h(a: v16i16, b: v16i16) -> v8i32 { - __lasx_xvhsubw_w_h(a, b) +pub fn lasx_xvhsubw_w_h(a: v16i16, b: v16i16) -> v8i32 { + unsafe { __lasx_xvhsubw_w_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvhsubw_d_w(a: v8i32, b: v8i32) -> v4i64 { - __lasx_xvhsubw_d_w(a, b) +pub fn lasx_xvhsubw_d_w(a: v8i32, b: v8i32) -> v4i64 { + unsafe { __lasx_xvhsubw_d_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvhsubw_hu_bu(a: v32u8, b: v32u8) -> v16i16 { - __lasx_xvhsubw_hu_bu(a, b) +pub fn lasx_xvhsubw_hu_bu(a: v32u8, b: v32u8) -> v16i16 { + unsafe { __lasx_xvhsubw_hu_bu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvhsubw_wu_hu(a: v16u16, b: v16u16) -> v8i32 { - __lasx_xvhsubw_wu_hu(a, b) +pub fn lasx_xvhsubw_wu_hu(a: v16u16, b: v16u16) -> v8i32 { + unsafe { __lasx_xvhsubw_wu_hu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvhsubw_du_wu(a: v8u32, b: v8u32) -> v4i64 { - __lasx_xvhsubw_du_wu(a, b) +pub fn lasx_xvhsubw_du_wu(a: v8u32, b: v8u32) -> v4i64 { + unsafe { __lasx_xvhsubw_du_wu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmod_b(a: v32i8, b: v32i8) -> v32i8 { - __lasx_xvmod_b(a, b) +pub fn lasx_xvmod_b(a: v32i8, b: v32i8) -> v32i8 { + unsafe { __lasx_xvmod_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmod_h(a: v16i16, b: v16i16) -> v16i16 { - __lasx_xvmod_h(a, b) +pub fn lasx_xvmod_h(a: v16i16, b: v16i16) -> v16i16 { + unsafe { __lasx_xvmod_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmod_w(a: v8i32, b: v8i32) -> v8i32 { - __lasx_xvmod_w(a, b) +pub fn lasx_xvmod_w(a: v8i32, b: v8i32) -> v8i32 { + unsafe { __lasx_xvmod_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmod_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvmod_d(a, b) +pub fn lasx_xvmod_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvmod_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmod_bu(a: v32u8, b: v32u8) -> v32u8 { - __lasx_xvmod_bu(a, b) +pub fn lasx_xvmod_bu(a: v32u8, b: v32u8) -> v32u8 { + unsafe { __lasx_xvmod_bu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmod_hu(a: v16u16, b: v16u16) -> v16u16 { - __lasx_xvmod_hu(a, b) +pub fn lasx_xvmod_hu(a: v16u16, b: v16u16) -> v16u16 { + unsafe { __lasx_xvmod_hu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmod_wu(a: v8u32, b: v8u32) -> v8u32 { - __lasx_xvmod_wu(a, b) +pub fn lasx_xvmod_wu(a: v8u32, b: v8u32) -> v8u32 { + unsafe { __lasx_xvmod_wu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmod_du(a: v4u64, b: v4u64) -> v4u64 { - __lasx_xvmod_du(a, b) +pub fn lasx_xvmod_du(a: v4u64, b: v4u64) -> v4u64 { + unsafe { __lasx_xvmod_du(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvrepl128vei_b(a: v32i8) -> v32i8 { +pub fn lasx_xvrepl128vei_b(a: v32i8) -> v32i8 { static_assert_uimm_bits!(IMM4, 4); - __lasx_xvrepl128vei_b(a, IMM4) + unsafe { __lasx_xvrepl128vei_b(a, IMM4) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvrepl128vei_h(a: v16i16) -> v16i16 { +pub fn lasx_xvrepl128vei_h(a: v16i16) -> v16i16 { static_assert_uimm_bits!(IMM3, 3); - __lasx_xvrepl128vei_h(a, IMM3) + unsafe { __lasx_xvrepl128vei_h(a, IMM3) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvrepl128vei_w(a: v8i32) -> v8i32 { +pub fn lasx_xvrepl128vei_w(a: v8i32) -> v8i32 { static_assert_uimm_bits!(IMM2, 2); - __lasx_xvrepl128vei_w(a, IMM2) + unsafe { __lasx_xvrepl128vei_w(a, IMM2) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvrepl128vei_d(a: v4i64) -> v4i64 { +pub fn lasx_xvrepl128vei_d(a: v4i64) -> v4i64 { static_assert_uimm_bits!(IMM1, 1); - __lasx_xvrepl128vei_d(a, IMM1) + unsafe { __lasx_xvrepl128vei_d(a, IMM1) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvpickev_b(a: v32i8, b: v32i8) -> v32i8 { - __lasx_xvpickev_b(a, b) +pub fn lasx_xvpickev_b(a: v32i8, b: v32i8) -> v32i8 { + unsafe { __lasx_xvpickev_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvpickev_h(a: v16i16, b: v16i16) -> v16i16 { - __lasx_xvpickev_h(a, b) +pub fn lasx_xvpickev_h(a: v16i16, b: v16i16) -> v16i16 { + unsafe { __lasx_xvpickev_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvpickev_w(a: v8i32, b: v8i32) -> v8i32 { - __lasx_xvpickev_w(a, b) +pub fn lasx_xvpickev_w(a: v8i32, b: v8i32) -> v8i32 { + unsafe { __lasx_xvpickev_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvpickev_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvpickev_d(a, b) +pub fn lasx_xvpickev_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvpickev_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvpickod_b(a: v32i8, b: v32i8) -> v32i8 { - __lasx_xvpickod_b(a, b) +pub fn lasx_xvpickod_b(a: v32i8, b: v32i8) -> v32i8 { + unsafe { __lasx_xvpickod_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvpickod_h(a: v16i16, b: v16i16) -> v16i16 { - __lasx_xvpickod_h(a, b) +pub fn lasx_xvpickod_h(a: v16i16, b: v16i16) -> v16i16 { + unsafe { __lasx_xvpickod_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvpickod_w(a: v8i32, b: v8i32) -> v8i32 { - __lasx_xvpickod_w(a, b) +pub fn lasx_xvpickod_w(a: v8i32, b: v8i32) -> v8i32 { + unsafe { __lasx_xvpickod_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvpickod_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvpickod_d(a, b) +pub fn lasx_xvpickod_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvpickod_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvilvh_b(a: v32i8, b: v32i8) -> v32i8 { - __lasx_xvilvh_b(a, b) +pub fn lasx_xvilvh_b(a: v32i8, b: v32i8) -> v32i8 { + unsafe { __lasx_xvilvh_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvilvh_h(a: v16i16, b: v16i16) -> v16i16 { - __lasx_xvilvh_h(a, b) +pub fn lasx_xvilvh_h(a: v16i16, b: v16i16) -> v16i16 { + unsafe { __lasx_xvilvh_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvilvh_w(a: v8i32, b: v8i32) -> v8i32 { - __lasx_xvilvh_w(a, b) +pub fn lasx_xvilvh_w(a: v8i32, b: v8i32) -> v8i32 { + unsafe { __lasx_xvilvh_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvilvh_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvilvh_d(a, b) +pub fn lasx_xvilvh_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvilvh_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvilvl_b(a: v32i8, b: v32i8) -> v32i8 { - __lasx_xvilvl_b(a, b) +pub fn lasx_xvilvl_b(a: v32i8, b: v32i8) -> v32i8 { + unsafe { __lasx_xvilvl_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvilvl_h(a: v16i16, b: v16i16) -> v16i16 { - __lasx_xvilvl_h(a, b) +pub fn lasx_xvilvl_h(a: v16i16, b: v16i16) -> v16i16 { + unsafe { __lasx_xvilvl_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvilvl_w(a: v8i32, b: v8i32) -> v8i32 { - __lasx_xvilvl_w(a, b) +pub fn lasx_xvilvl_w(a: v8i32, b: v8i32) -> v8i32 { + unsafe { __lasx_xvilvl_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvilvl_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvilvl_d(a, b) +pub fn lasx_xvilvl_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvilvl_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvpackev_b(a: v32i8, b: v32i8) -> v32i8 { - __lasx_xvpackev_b(a, b) +pub fn lasx_xvpackev_b(a: v32i8, b: v32i8) -> v32i8 { + unsafe { __lasx_xvpackev_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvpackev_h(a: v16i16, b: v16i16) -> v16i16 { - __lasx_xvpackev_h(a, b) +pub fn lasx_xvpackev_h(a: v16i16, b: v16i16) -> v16i16 { + unsafe { __lasx_xvpackev_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvpackev_w(a: v8i32, b: v8i32) -> v8i32 { - __lasx_xvpackev_w(a, b) +pub fn lasx_xvpackev_w(a: v8i32, b: v8i32) -> v8i32 { + unsafe { __lasx_xvpackev_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvpackev_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvpackev_d(a, b) +pub fn lasx_xvpackev_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvpackev_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvpackod_b(a: v32i8, b: v32i8) -> v32i8 { - __lasx_xvpackod_b(a, b) +pub fn lasx_xvpackod_b(a: v32i8, b: v32i8) -> v32i8 { + unsafe { __lasx_xvpackod_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvpackod_h(a: v16i16, b: v16i16) -> v16i16 { - __lasx_xvpackod_h(a, b) +pub fn lasx_xvpackod_h(a: v16i16, b: v16i16) -> v16i16 { + unsafe { __lasx_xvpackod_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvpackod_w(a: v8i32, b: v8i32) -> v8i32 { - __lasx_xvpackod_w(a, b) +pub fn lasx_xvpackod_w(a: v8i32, b: v8i32) -> v8i32 { + unsafe { __lasx_xvpackod_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvpackod_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvpackod_d(a, b) +pub fn lasx_xvpackod_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvpackod_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvshuf_b(a: v32i8, b: v32i8, c: v32i8) -> v32i8 { - __lasx_xvshuf_b(a, b, c) +pub fn lasx_xvshuf_b(a: v32i8, b: v32i8, c: v32i8) -> v32i8 { + unsafe { __lasx_xvshuf_b(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvshuf_h(a: v16i16, b: v16i16, c: v16i16) -> v16i16 { - __lasx_xvshuf_h(a, b, c) +pub fn lasx_xvshuf_h(a: v16i16, b: v16i16, c: v16i16) -> v16i16 { + unsafe { __lasx_xvshuf_h(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvshuf_w(a: v8i32, b: v8i32, c: v8i32) -> v8i32 { - __lasx_xvshuf_w(a, b, c) +pub fn lasx_xvshuf_w(a: v8i32, b: v8i32, c: v8i32) -> v8i32 { + unsafe { __lasx_xvshuf_w(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvshuf_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64 { - __lasx_xvshuf_d(a, b, c) +pub fn lasx_xvshuf_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64 { + unsafe { __lasx_xvshuf_d(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvand_v(a: v32u8, b: v32u8) -> v32u8 { - __lasx_xvand_v(a, b) +pub fn lasx_xvand_v(a: v32u8, b: v32u8) -> v32u8 { + unsafe { __lasx_xvand_v(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvandi_b(a: v32u8) -> v32u8 { +pub fn lasx_xvandi_b(a: v32u8) -> v32u8 { static_assert_uimm_bits!(IMM8, 8); - __lasx_xvandi_b(a, IMM8) + unsafe { __lasx_xvandi_b(a, IMM8) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvor_v(a: v32u8, b: v32u8) -> v32u8 { - __lasx_xvor_v(a, b) +pub fn lasx_xvor_v(a: v32u8, b: v32u8) -> v32u8 { + unsafe { __lasx_xvor_v(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvori_b(a: v32u8) -> v32u8 { +pub fn lasx_xvori_b(a: v32u8) -> v32u8 { static_assert_uimm_bits!(IMM8, 8); - __lasx_xvori_b(a, IMM8) + unsafe { __lasx_xvori_b(a, IMM8) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvnor_v(a: v32u8, b: v32u8) -> v32u8 { - __lasx_xvnor_v(a, b) +pub fn lasx_xvnor_v(a: v32u8, b: v32u8) -> v32u8 { + unsafe { __lasx_xvnor_v(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvnori_b(a: v32u8) -> v32u8 { +pub fn lasx_xvnori_b(a: v32u8) -> v32u8 { static_assert_uimm_bits!(IMM8, 8); - __lasx_xvnori_b(a, IMM8) + unsafe { __lasx_xvnori_b(a, IMM8) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvxor_v(a: v32u8, b: v32u8) -> v32u8 { - __lasx_xvxor_v(a, b) +pub fn lasx_xvxor_v(a: v32u8, b: v32u8) -> v32u8 { + unsafe { __lasx_xvxor_v(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvxori_b(a: v32u8) -> v32u8 { +pub fn lasx_xvxori_b(a: v32u8) -> v32u8 { static_assert_uimm_bits!(IMM8, 8); - __lasx_xvxori_b(a, IMM8) + unsafe { __lasx_xvxori_b(a, IMM8) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvbitsel_v(a: v32u8, b: v32u8, c: v32u8) -> v32u8 { - __lasx_xvbitsel_v(a, b, c) +pub fn lasx_xvbitsel_v(a: v32u8, b: v32u8, c: v32u8) -> v32u8 { + unsafe { __lasx_xvbitsel_v(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvbitseli_b(a: v32u8, b: v32u8) -> v32u8 { +pub fn lasx_xvbitseli_b(a: v32u8, b: v32u8) -> v32u8 { static_assert_uimm_bits!(IMM8, 8); - __lasx_xvbitseli_b(a, b, IMM8) + unsafe { __lasx_xvbitseli_b(a, b, IMM8) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvshuf4i_b(a: v32i8) -> v32i8 { +pub fn lasx_xvshuf4i_b(a: v32i8) -> v32i8 { static_assert_uimm_bits!(IMM8, 8); - __lasx_xvshuf4i_b(a, IMM8) + unsafe { __lasx_xvshuf4i_b(a, IMM8) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvshuf4i_h(a: v16i16) -> v16i16 { +pub fn lasx_xvshuf4i_h(a: v16i16) -> v16i16 { static_assert_uimm_bits!(IMM8, 8); - __lasx_xvshuf4i_h(a, IMM8) + unsafe { __lasx_xvshuf4i_h(a, IMM8) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvshuf4i_w(a: v8i32) -> v8i32 { +pub fn lasx_xvshuf4i_w(a: v8i32) -> v8i32 { static_assert_uimm_bits!(IMM8, 8); - __lasx_xvshuf4i_w(a, IMM8) + unsafe { __lasx_xvshuf4i_w(a, IMM8) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvreplgr2vr_b(a: i32) -> v32i8 { - __lasx_xvreplgr2vr_b(a) +pub fn lasx_xvreplgr2vr_b(a: i32) -> v32i8 { + unsafe { __lasx_xvreplgr2vr_b(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvreplgr2vr_h(a: i32) -> v16i16 { - __lasx_xvreplgr2vr_h(a) +pub fn lasx_xvreplgr2vr_h(a: i32) -> v16i16 { + unsafe { __lasx_xvreplgr2vr_h(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvreplgr2vr_w(a: i32) -> v8i32 { - __lasx_xvreplgr2vr_w(a) +pub fn lasx_xvreplgr2vr_w(a: i32) -> v8i32 { + unsafe { __lasx_xvreplgr2vr_w(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvreplgr2vr_d(a: i64) -> v4i64 { - __lasx_xvreplgr2vr_d(a) +pub fn lasx_xvreplgr2vr_d(a: i64) -> v4i64 { + unsafe { __lasx_xvreplgr2vr_d(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvpcnt_b(a: v32i8) -> v32i8 { - __lasx_xvpcnt_b(a) +pub fn lasx_xvpcnt_b(a: v32i8) -> v32i8 { + unsafe { __lasx_xvpcnt_b(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvpcnt_h(a: v16i16) -> v16i16 { - __lasx_xvpcnt_h(a) +pub fn lasx_xvpcnt_h(a: v16i16) -> v16i16 { + unsafe { __lasx_xvpcnt_h(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvpcnt_w(a: v8i32) -> v8i32 { - __lasx_xvpcnt_w(a) +pub fn lasx_xvpcnt_w(a: v8i32) -> v8i32 { + unsafe { __lasx_xvpcnt_w(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvpcnt_d(a: v4i64) -> v4i64 { - __lasx_xvpcnt_d(a) +pub fn lasx_xvpcnt_d(a: v4i64) -> v4i64 { + unsafe { __lasx_xvpcnt_d(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvclo_b(a: v32i8) -> v32i8 { - __lasx_xvclo_b(a) +pub fn lasx_xvclo_b(a: v32i8) -> v32i8 { + unsafe { __lasx_xvclo_b(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvclo_h(a: v16i16) -> v16i16 { - __lasx_xvclo_h(a) +pub fn lasx_xvclo_h(a: v16i16) -> v16i16 { + unsafe { __lasx_xvclo_h(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvclo_w(a: v8i32) -> v8i32 { - __lasx_xvclo_w(a) +pub fn lasx_xvclo_w(a: v8i32) -> v8i32 { + unsafe { __lasx_xvclo_w(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvclo_d(a: v4i64) -> v4i64 { - __lasx_xvclo_d(a) +pub fn lasx_xvclo_d(a: v4i64) -> v4i64 { + unsafe { __lasx_xvclo_d(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvclz_b(a: v32i8) -> v32i8 { - __lasx_xvclz_b(a) +pub fn lasx_xvclz_b(a: v32i8) -> v32i8 { + unsafe { __lasx_xvclz_b(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvclz_h(a: v16i16) -> v16i16 { - __lasx_xvclz_h(a) +pub fn lasx_xvclz_h(a: v16i16) -> v16i16 { + unsafe { __lasx_xvclz_h(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvclz_w(a: v8i32) -> v8i32 { - __lasx_xvclz_w(a) +pub fn lasx_xvclz_w(a: v8i32) -> v8i32 { + unsafe { __lasx_xvclz_w(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvclz_d(a: v4i64) -> v4i64 { - __lasx_xvclz_d(a) +pub fn lasx_xvclz_d(a: v4i64) -> v4i64 { + unsafe { __lasx_xvclz_d(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfadd_s(a: v8f32, b: v8f32) -> v8f32 { - __lasx_xvfadd_s(a, b) +pub fn lasx_xvfadd_s(a: v8f32, b: v8f32) -> v8f32 { + unsafe { __lasx_xvfadd_s(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfadd_d(a: v4f64, b: v4f64) -> v4f64 { - __lasx_xvfadd_d(a, b) +pub fn lasx_xvfadd_d(a: v4f64, b: v4f64) -> v4f64 { + unsafe { __lasx_xvfadd_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfsub_s(a: v8f32, b: v8f32) -> v8f32 { - __lasx_xvfsub_s(a, b) +pub fn lasx_xvfsub_s(a: v8f32, b: v8f32) -> v8f32 { + unsafe { __lasx_xvfsub_s(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfsub_d(a: v4f64, b: v4f64) -> v4f64 { - __lasx_xvfsub_d(a, b) +pub fn lasx_xvfsub_d(a: v4f64, b: v4f64) -> v4f64 { + unsafe { __lasx_xvfsub_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfmul_s(a: v8f32, b: v8f32) -> v8f32 { - __lasx_xvfmul_s(a, b) +pub fn lasx_xvfmul_s(a: v8f32, b: v8f32) -> v8f32 { + unsafe { __lasx_xvfmul_s(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfmul_d(a: v4f64, b: v4f64) -> v4f64 { - __lasx_xvfmul_d(a, b) +pub fn lasx_xvfmul_d(a: v4f64, b: v4f64) -> v4f64 { + unsafe { __lasx_xvfmul_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfdiv_s(a: v8f32, b: v8f32) -> v8f32 { - __lasx_xvfdiv_s(a, b) +pub fn lasx_xvfdiv_s(a: v8f32, b: v8f32) -> v8f32 { + unsafe { __lasx_xvfdiv_s(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfdiv_d(a: v4f64, b: v4f64) -> v4f64 { - __lasx_xvfdiv_d(a, b) +pub fn lasx_xvfdiv_d(a: v4f64, b: v4f64) -> v4f64 { + unsafe { __lasx_xvfdiv_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcvt_h_s(a: v8f32, b: v8f32) -> v16i16 { - __lasx_xvfcvt_h_s(a, b) +pub fn lasx_xvfcvt_h_s(a: v8f32, b: v8f32) -> v16i16 { + unsafe { __lasx_xvfcvt_h_s(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcvt_s_d(a: v4f64, b: v4f64) -> v8f32 { - __lasx_xvfcvt_s_d(a, b) +pub fn lasx_xvfcvt_s_d(a: v4f64, b: v4f64) -> v8f32 { + unsafe { __lasx_xvfcvt_s_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfmin_s(a: v8f32, b: v8f32) -> v8f32 { - __lasx_xvfmin_s(a, b) +pub fn lasx_xvfmin_s(a: v8f32, b: v8f32) -> v8f32 { + unsafe { __lasx_xvfmin_s(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfmin_d(a: v4f64, b: v4f64) -> v4f64 { - __lasx_xvfmin_d(a, b) +pub fn lasx_xvfmin_d(a: v4f64, b: v4f64) -> v4f64 { + unsafe { __lasx_xvfmin_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfmina_s(a: v8f32, b: v8f32) -> v8f32 { - __lasx_xvfmina_s(a, b) +pub fn lasx_xvfmina_s(a: v8f32, b: v8f32) -> v8f32 { + unsafe { __lasx_xvfmina_s(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfmina_d(a: v4f64, b: v4f64) -> v4f64 { - __lasx_xvfmina_d(a, b) +pub fn lasx_xvfmina_d(a: v4f64, b: v4f64) -> v4f64 { + unsafe { __lasx_xvfmina_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfmax_s(a: v8f32, b: v8f32) -> v8f32 { - __lasx_xvfmax_s(a, b) +pub fn lasx_xvfmax_s(a: v8f32, b: v8f32) -> v8f32 { + unsafe { __lasx_xvfmax_s(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfmax_d(a: v4f64, b: v4f64) -> v4f64 { - __lasx_xvfmax_d(a, b) +pub fn lasx_xvfmax_d(a: v4f64, b: v4f64) -> v4f64 { + unsafe { __lasx_xvfmax_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfmaxa_s(a: v8f32, b: v8f32) -> v8f32 { - __lasx_xvfmaxa_s(a, b) +pub fn lasx_xvfmaxa_s(a: v8f32, b: v8f32) -> v8f32 { + unsafe { __lasx_xvfmaxa_s(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfmaxa_d(a: v4f64, b: v4f64) -> v4f64 { - __lasx_xvfmaxa_d(a, b) +pub fn lasx_xvfmaxa_d(a: v4f64, b: v4f64) -> v4f64 { + unsafe { __lasx_xvfmaxa_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfclass_s(a: v8f32) -> v8i32 { - __lasx_xvfclass_s(a) +pub fn lasx_xvfclass_s(a: v8f32) -> v8i32 { + unsafe { __lasx_xvfclass_s(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfclass_d(a: v4f64) -> v4i64 { - __lasx_xvfclass_d(a) +pub fn lasx_xvfclass_d(a: v4f64) -> v4i64 { + unsafe { __lasx_xvfclass_d(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfsqrt_s(a: v8f32) -> v8f32 { - __lasx_xvfsqrt_s(a) +pub fn lasx_xvfsqrt_s(a: v8f32) -> v8f32 { + unsafe { __lasx_xvfsqrt_s(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfsqrt_d(a: v4f64) -> v4f64 { - __lasx_xvfsqrt_d(a) +pub fn lasx_xvfsqrt_d(a: v4f64) -> v4f64 { + unsafe { __lasx_xvfsqrt_d(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfrecip_s(a: v8f32) -> v8f32 { - __lasx_xvfrecip_s(a) +pub fn lasx_xvfrecip_s(a: v8f32) -> v8f32 { + unsafe { __lasx_xvfrecip_s(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfrecip_d(a: v4f64) -> v4f64 { - __lasx_xvfrecip_d(a) +pub fn lasx_xvfrecip_d(a: v4f64) -> v4f64 { + unsafe { __lasx_xvfrecip_d(a) } } #[inline] #[target_feature(enable = "lasx,frecipe")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfrecipe_s(a: v8f32) -> v8f32 { - __lasx_xvfrecipe_s(a) +pub fn lasx_xvfrecipe_s(a: v8f32) -> v8f32 { + unsafe { __lasx_xvfrecipe_s(a) } } #[inline] #[target_feature(enable = "lasx,frecipe")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfrecipe_d(a: v4f64) -> v4f64 { - __lasx_xvfrecipe_d(a) +pub fn lasx_xvfrecipe_d(a: v4f64) -> v4f64 { + unsafe { __lasx_xvfrecipe_d(a) } } #[inline] #[target_feature(enable = "lasx,frecipe")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfrsqrte_s(a: v8f32) -> v8f32 { - __lasx_xvfrsqrte_s(a) +pub fn lasx_xvfrsqrte_s(a: v8f32) -> v8f32 { + unsafe { __lasx_xvfrsqrte_s(a) } } #[inline] #[target_feature(enable = "lasx,frecipe")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfrsqrte_d(a: v4f64) -> v4f64 { - __lasx_xvfrsqrte_d(a) +pub fn lasx_xvfrsqrte_d(a: v4f64) -> v4f64 { + unsafe { __lasx_xvfrsqrte_d(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfrint_s(a: v8f32) -> v8f32 { - __lasx_xvfrint_s(a) +pub fn lasx_xvfrint_s(a: v8f32) -> v8f32 { + unsafe { __lasx_xvfrint_s(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfrint_d(a: v4f64) -> v4f64 { - __lasx_xvfrint_d(a) +pub fn lasx_xvfrint_d(a: v4f64) -> v4f64 { + unsafe { __lasx_xvfrint_d(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfrsqrt_s(a: v8f32) -> v8f32 { - __lasx_xvfrsqrt_s(a) +pub fn lasx_xvfrsqrt_s(a: v8f32) -> v8f32 { + unsafe { __lasx_xvfrsqrt_s(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfrsqrt_d(a: v4f64) -> v4f64 { - __lasx_xvfrsqrt_d(a) +pub fn lasx_xvfrsqrt_d(a: v4f64) -> v4f64 { + unsafe { __lasx_xvfrsqrt_d(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvflogb_s(a: v8f32) -> v8f32 { - __lasx_xvflogb_s(a) +pub fn lasx_xvflogb_s(a: v8f32) -> v8f32 { + unsafe { __lasx_xvflogb_s(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvflogb_d(a: v4f64) -> v4f64 { - __lasx_xvflogb_d(a) +pub fn lasx_xvflogb_d(a: v4f64) -> v4f64 { + unsafe { __lasx_xvflogb_d(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcvth_s_h(a: v16i16) -> v8f32 { - __lasx_xvfcvth_s_h(a) +pub fn lasx_xvfcvth_s_h(a: v16i16) -> v8f32 { + unsafe { __lasx_xvfcvth_s_h(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcvth_d_s(a: v8f32) -> v4f64 { - __lasx_xvfcvth_d_s(a) +pub fn lasx_xvfcvth_d_s(a: v8f32) -> v4f64 { + unsafe { __lasx_xvfcvth_d_s(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcvtl_s_h(a: v16i16) -> v8f32 { - __lasx_xvfcvtl_s_h(a) +pub fn lasx_xvfcvtl_s_h(a: v16i16) -> v8f32 { + unsafe { __lasx_xvfcvtl_s_h(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcvtl_d_s(a: v8f32) -> v4f64 { - __lasx_xvfcvtl_d_s(a) +pub fn lasx_xvfcvtl_d_s(a: v8f32) -> v4f64 { + unsafe { __lasx_xvfcvtl_d_s(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvftint_w_s(a: v8f32) -> v8i32 { - __lasx_xvftint_w_s(a) +pub fn lasx_xvftint_w_s(a: v8f32) -> v8i32 { + unsafe { __lasx_xvftint_w_s(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvftint_l_d(a: v4f64) -> v4i64 { - __lasx_xvftint_l_d(a) +pub fn lasx_xvftint_l_d(a: v4f64) -> v4i64 { + unsafe { __lasx_xvftint_l_d(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvftint_wu_s(a: v8f32) -> v8u32 { - __lasx_xvftint_wu_s(a) +pub fn lasx_xvftint_wu_s(a: v8f32) -> v8u32 { + unsafe { __lasx_xvftint_wu_s(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvftint_lu_d(a: v4f64) -> v4u64 { - __lasx_xvftint_lu_d(a) +pub fn lasx_xvftint_lu_d(a: v4f64) -> v4u64 { + unsafe { __lasx_xvftint_lu_d(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvftintrz_w_s(a: v8f32) -> v8i32 { - __lasx_xvftintrz_w_s(a) +pub fn lasx_xvftintrz_w_s(a: v8f32) -> v8i32 { + unsafe { __lasx_xvftintrz_w_s(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvftintrz_l_d(a: v4f64) -> v4i64 { - __lasx_xvftintrz_l_d(a) +pub fn lasx_xvftintrz_l_d(a: v4f64) -> v4i64 { + unsafe { __lasx_xvftintrz_l_d(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvftintrz_wu_s(a: v8f32) -> v8u32 { - __lasx_xvftintrz_wu_s(a) +pub fn lasx_xvftintrz_wu_s(a: v8f32) -> v8u32 { + unsafe { __lasx_xvftintrz_wu_s(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvftintrz_lu_d(a: v4f64) -> v4u64 { - __lasx_xvftintrz_lu_d(a) +pub fn lasx_xvftintrz_lu_d(a: v4f64) -> v4u64 { + unsafe { __lasx_xvftintrz_lu_d(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvffint_s_w(a: v8i32) -> v8f32 { - __lasx_xvffint_s_w(a) +pub fn lasx_xvffint_s_w(a: v8i32) -> v8f32 { + unsafe { __lasx_xvffint_s_w(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvffint_d_l(a: v4i64) -> v4f64 { - __lasx_xvffint_d_l(a) +pub fn lasx_xvffint_d_l(a: v4i64) -> v4f64 { + unsafe { __lasx_xvffint_d_l(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvffint_s_wu(a: v8u32) -> v8f32 { - __lasx_xvffint_s_wu(a) +pub fn lasx_xvffint_s_wu(a: v8u32) -> v8f32 { + unsafe { __lasx_xvffint_s_wu(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvffint_d_lu(a: v4u64) -> v4f64 { - __lasx_xvffint_d_lu(a) +pub fn lasx_xvffint_d_lu(a: v4u64) -> v4f64 { + unsafe { __lasx_xvffint_d_lu(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvreplve_b(a: v32i8, b: i32) -> v32i8 { - __lasx_xvreplve_b(a, b) +pub fn lasx_xvreplve_b(a: v32i8, b: i32) -> v32i8 { + unsafe { __lasx_xvreplve_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvreplve_h(a: v16i16, b: i32) -> v16i16 { - __lasx_xvreplve_h(a, b) +pub fn lasx_xvreplve_h(a: v16i16, b: i32) -> v16i16 { + unsafe { __lasx_xvreplve_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvreplve_w(a: v8i32, b: i32) -> v8i32 { - __lasx_xvreplve_w(a, b) +pub fn lasx_xvreplve_w(a: v8i32, b: i32) -> v8i32 { + unsafe { __lasx_xvreplve_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvreplve_d(a: v4i64, b: i32) -> v4i64 { - __lasx_xvreplve_d(a, b) +pub fn lasx_xvreplve_d(a: v4i64, b: i32) -> v4i64 { + unsafe { __lasx_xvreplve_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvpermi_w(a: v8i32, b: v8i32) -> v8i32 { +pub fn lasx_xvpermi_w(a: v8i32, b: v8i32) -> v8i32 { static_assert_uimm_bits!(IMM8, 8); - __lasx_xvpermi_w(a, b, IMM8) + unsafe { __lasx_xvpermi_w(a, b, IMM8) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvandn_v(a: v32u8, b: v32u8) -> v32u8 { - __lasx_xvandn_v(a, b) +pub fn lasx_xvandn_v(a: v32u8, b: v32u8) -> v32u8 { + unsafe { __lasx_xvandn_v(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvneg_b(a: v32i8) -> v32i8 { - __lasx_xvneg_b(a) +pub fn lasx_xvneg_b(a: v32i8) -> v32i8 { + unsafe { __lasx_xvneg_b(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvneg_h(a: v16i16) -> v16i16 { - __lasx_xvneg_h(a) +pub fn lasx_xvneg_h(a: v16i16) -> v16i16 { + unsafe { __lasx_xvneg_h(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvneg_w(a: v8i32) -> v8i32 { - __lasx_xvneg_w(a) +pub fn lasx_xvneg_w(a: v8i32) -> v8i32 { + unsafe { __lasx_xvneg_w(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvneg_d(a: v4i64) -> v4i64 { - __lasx_xvneg_d(a) +pub fn lasx_xvneg_d(a: v4i64) -> v4i64 { + unsafe { __lasx_xvneg_d(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmuh_b(a: v32i8, b: v32i8) -> v32i8 { - __lasx_xvmuh_b(a, b) +pub fn lasx_xvmuh_b(a: v32i8, b: v32i8) -> v32i8 { + unsafe { __lasx_xvmuh_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmuh_h(a: v16i16, b: v16i16) -> v16i16 { - __lasx_xvmuh_h(a, b) +pub fn lasx_xvmuh_h(a: v16i16, b: v16i16) -> v16i16 { + unsafe { __lasx_xvmuh_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmuh_w(a: v8i32, b: v8i32) -> v8i32 { - __lasx_xvmuh_w(a, b) +pub fn lasx_xvmuh_w(a: v8i32, b: v8i32) -> v8i32 { + unsafe { __lasx_xvmuh_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmuh_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvmuh_d(a, b) +pub fn lasx_xvmuh_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvmuh_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmuh_bu(a: v32u8, b: v32u8) -> v32u8 { - __lasx_xvmuh_bu(a, b) +pub fn lasx_xvmuh_bu(a: v32u8, b: v32u8) -> v32u8 { + unsafe { __lasx_xvmuh_bu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmuh_hu(a: v16u16, b: v16u16) -> v16u16 { - __lasx_xvmuh_hu(a, b) +pub fn lasx_xvmuh_hu(a: v16u16, b: v16u16) -> v16u16 { + unsafe { __lasx_xvmuh_hu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmuh_wu(a: v8u32, b: v8u32) -> v8u32 { - __lasx_xvmuh_wu(a, b) +pub fn lasx_xvmuh_wu(a: v8u32, b: v8u32) -> v8u32 { + unsafe { __lasx_xvmuh_wu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmuh_du(a: v4u64, b: v4u64) -> v4u64 { - __lasx_xvmuh_du(a, b) +pub fn lasx_xvmuh_du(a: v4u64, b: v4u64) -> v4u64 { + unsafe { __lasx_xvmuh_du(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsllwil_h_b(a: v32i8) -> v16i16 { +pub fn lasx_xvsllwil_h_b(a: v32i8) -> v16i16 { static_assert_uimm_bits!(IMM3, 3); - __lasx_xvsllwil_h_b(a, IMM3) + unsafe { __lasx_xvsllwil_h_b(a, IMM3) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsllwil_w_h(a: v16i16) -> v8i32 { +pub fn lasx_xvsllwil_w_h(a: v16i16) -> v8i32 { static_assert_uimm_bits!(IMM4, 4); - __lasx_xvsllwil_w_h(a, IMM4) + unsafe { __lasx_xvsllwil_w_h(a, IMM4) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsllwil_d_w(a: v8i32) -> v4i64 { +pub fn lasx_xvsllwil_d_w(a: v8i32) -> v4i64 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvsllwil_d_w(a, IMM5) + unsafe { __lasx_xvsllwil_d_w(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsllwil_hu_bu(a: v32u8) -> v16u16 { +pub fn lasx_xvsllwil_hu_bu(a: v32u8) -> v16u16 { static_assert_uimm_bits!(IMM3, 3); - __lasx_xvsllwil_hu_bu(a, IMM3) + unsafe { __lasx_xvsllwil_hu_bu(a, IMM3) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsllwil_wu_hu(a: v16u16) -> v8u32 { +pub fn lasx_xvsllwil_wu_hu(a: v16u16) -> v8u32 { static_assert_uimm_bits!(IMM4, 4); - __lasx_xvsllwil_wu_hu(a, IMM4) + unsafe { __lasx_xvsllwil_wu_hu(a, IMM4) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsllwil_du_wu(a: v8u32) -> v4u64 { +pub fn lasx_xvsllwil_du_wu(a: v8u32) -> v4u64 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvsllwil_du_wu(a, IMM5) + unsafe { __lasx_xvsllwil_du_wu(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsran_b_h(a: v16i16, b: v16i16) -> v32i8 { - __lasx_xvsran_b_h(a, b) +pub fn lasx_xvsran_b_h(a: v16i16, b: v16i16) -> v32i8 { + unsafe { __lasx_xvsran_b_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsran_h_w(a: v8i32, b: v8i32) -> v16i16 { - __lasx_xvsran_h_w(a, b) +pub fn lasx_xvsran_h_w(a: v8i32, b: v8i32) -> v16i16 { + unsafe { __lasx_xvsran_h_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsran_w_d(a: v4i64, b: v4i64) -> v8i32 { - __lasx_xvsran_w_d(a, b) +pub fn lasx_xvsran_w_d(a: v4i64, b: v4i64) -> v8i32 { + unsafe { __lasx_xvsran_w_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssran_b_h(a: v16i16, b: v16i16) -> v32i8 { - __lasx_xvssran_b_h(a, b) +pub fn lasx_xvssran_b_h(a: v16i16, b: v16i16) -> v32i8 { + unsafe { __lasx_xvssran_b_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssran_h_w(a: v8i32, b: v8i32) -> v16i16 { - __lasx_xvssran_h_w(a, b) +pub fn lasx_xvssran_h_w(a: v8i32, b: v8i32) -> v16i16 { + unsafe { __lasx_xvssran_h_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssran_w_d(a: v4i64, b: v4i64) -> v8i32 { - __lasx_xvssran_w_d(a, b) +pub fn lasx_xvssran_w_d(a: v4i64, b: v4i64) -> v8i32 { + unsafe { __lasx_xvssran_w_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssran_bu_h(a: v16u16, b: v16u16) -> v32u8 { - __lasx_xvssran_bu_h(a, b) +pub fn lasx_xvssran_bu_h(a: v16u16, b: v16u16) -> v32u8 { + unsafe { __lasx_xvssran_bu_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssran_hu_w(a: v8u32, b: v8u32) -> v16u16 { - __lasx_xvssran_hu_w(a, b) +pub fn lasx_xvssran_hu_w(a: v8u32, b: v8u32) -> v16u16 { + unsafe { __lasx_xvssran_hu_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssran_wu_d(a: v4u64, b: v4u64) -> v8u32 { - __lasx_xvssran_wu_d(a, b) +pub fn lasx_xvssran_wu_d(a: v4u64, b: v4u64) -> v8u32 { + unsafe { __lasx_xvssran_wu_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrarn_b_h(a: v16i16, b: v16i16) -> v32i8 { - __lasx_xvsrarn_b_h(a, b) +pub fn lasx_xvsrarn_b_h(a: v16i16, b: v16i16) -> v32i8 { + unsafe { __lasx_xvsrarn_b_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrarn_h_w(a: v8i32, b: v8i32) -> v16i16 { - __lasx_xvsrarn_h_w(a, b) +pub fn lasx_xvsrarn_h_w(a: v8i32, b: v8i32) -> v16i16 { + unsafe { __lasx_xvsrarn_h_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrarn_w_d(a: v4i64, b: v4i64) -> v8i32 { - __lasx_xvsrarn_w_d(a, b) +pub fn lasx_xvsrarn_w_d(a: v4i64, b: v4i64) -> v8i32 { + unsafe { __lasx_xvsrarn_w_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrarn_b_h(a: v16i16, b: v16i16) -> v32i8 { - __lasx_xvssrarn_b_h(a, b) +pub fn lasx_xvssrarn_b_h(a: v16i16, b: v16i16) -> v32i8 { + unsafe { __lasx_xvssrarn_b_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrarn_h_w(a: v8i32, b: v8i32) -> v16i16 { - __lasx_xvssrarn_h_w(a, b) +pub fn lasx_xvssrarn_h_w(a: v8i32, b: v8i32) -> v16i16 { + unsafe { __lasx_xvssrarn_h_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrarn_w_d(a: v4i64, b: v4i64) -> v8i32 { - __lasx_xvssrarn_w_d(a, b) +pub fn lasx_xvssrarn_w_d(a: v4i64, b: v4i64) -> v8i32 { + unsafe { __lasx_xvssrarn_w_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrarn_bu_h(a: v16u16, b: v16u16) -> v32u8 { - __lasx_xvssrarn_bu_h(a, b) +pub fn lasx_xvssrarn_bu_h(a: v16u16, b: v16u16) -> v32u8 { + unsafe { __lasx_xvssrarn_bu_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrarn_hu_w(a: v8u32, b: v8u32) -> v16u16 { - __lasx_xvssrarn_hu_w(a, b) +pub fn lasx_xvssrarn_hu_w(a: v8u32, b: v8u32) -> v16u16 { + unsafe { __lasx_xvssrarn_hu_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrarn_wu_d(a: v4u64, b: v4u64) -> v8u32 { - __lasx_xvssrarn_wu_d(a, b) +pub fn lasx_xvssrarn_wu_d(a: v4u64, b: v4u64) -> v8u32 { + unsafe { __lasx_xvssrarn_wu_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrln_b_h(a: v16i16, b: v16i16) -> v32i8 { - __lasx_xvsrln_b_h(a, b) +pub fn lasx_xvsrln_b_h(a: v16i16, b: v16i16) -> v32i8 { + unsafe { __lasx_xvsrln_b_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrln_h_w(a: v8i32, b: v8i32) -> v16i16 { - __lasx_xvsrln_h_w(a, b) +pub fn lasx_xvsrln_h_w(a: v8i32, b: v8i32) -> v16i16 { + unsafe { __lasx_xvsrln_h_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrln_w_d(a: v4i64, b: v4i64) -> v8i32 { - __lasx_xvsrln_w_d(a, b) +pub fn lasx_xvsrln_w_d(a: v4i64, b: v4i64) -> v8i32 { + unsafe { __lasx_xvsrln_w_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrln_bu_h(a: v16u16, b: v16u16) -> v32u8 { - __lasx_xvssrln_bu_h(a, b) +pub fn lasx_xvssrln_bu_h(a: v16u16, b: v16u16) -> v32u8 { + unsafe { __lasx_xvssrln_bu_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrln_hu_w(a: v8u32, b: v8u32) -> v16u16 { - __lasx_xvssrln_hu_w(a, b) +pub fn lasx_xvssrln_hu_w(a: v8u32, b: v8u32) -> v16u16 { + unsafe { __lasx_xvssrln_hu_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrln_wu_d(a: v4u64, b: v4u64) -> v8u32 { - __lasx_xvssrln_wu_d(a, b) +pub fn lasx_xvssrln_wu_d(a: v4u64, b: v4u64) -> v8u32 { + unsafe { __lasx_xvssrln_wu_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrlrn_b_h(a: v16i16, b: v16i16) -> v32i8 { - __lasx_xvsrlrn_b_h(a, b) +pub fn lasx_xvsrlrn_b_h(a: v16i16, b: v16i16) -> v32i8 { + unsafe { __lasx_xvsrlrn_b_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrlrn_h_w(a: v8i32, b: v8i32) -> v16i16 { - __lasx_xvsrlrn_h_w(a, b) +pub fn lasx_xvsrlrn_h_w(a: v8i32, b: v8i32) -> v16i16 { + unsafe { __lasx_xvsrlrn_h_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrlrn_w_d(a: v4i64, b: v4i64) -> v8i32 { - __lasx_xvsrlrn_w_d(a, b) +pub fn lasx_xvsrlrn_w_d(a: v4i64, b: v4i64) -> v8i32 { + unsafe { __lasx_xvsrlrn_w_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrlrn_bu_h(a: v16u16, b: v16u16) -> v32u8 { - __lasx_xvssrlrn_bu_h(a, b) +pub fn lasx_xvssrlrn_bu_h(a: v16u16, b: v16u16) -> v32u8 { + unsafe { __lasx_xvssrlrn_bu_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrlrn_hu_w(a: v8u32, b: v8u32) -> v16u16 { - __lasx_xvssrlrn_hu_w(a, b) +pub fn lasx_xvssrlrn_hu_w(a: v8u32, b: v8u32) -> v16u16 { + unsafe { __lasx_xvssrlrn_hu_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrlrn_wu_d(a: v4u64, b: v4u64) -> v8u32 { - __lasx_xvssrlrn_wu_d(a, b) +pub fn lasx_xvssrlrn_wu_d(a: v4u64, b: v4u64) -> v8u32 { + unsafe { __lasx_xvssrlrn_wu_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfrstpi_b(a: v32i8, b: v32i8) -> v32i8 { +pub fn lasx_xvfrstpi_b(a: v32i8, b: v32i8) -> v32i8 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvfrstpi_b(a, b, IMM5) + unsafe { __lasx_xvfrstpi_b(a, b, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfrstpi_h(a: v16i16, b: v16i16) -> v16i16 { +pub fn lasx_xvfrstpi_h(a: v16i16, b: v16i16) -> v16i16 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvfrstpi_h(a, b, IMM5) + unsafe { __lasx_xvfrstpi_h(a, b, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfrstp_b(a: v32i8, b: v32i8, c: v32i8) -> v32i8 { - __lasx_xvfrstp_b(a, b, c) +pub fn lasx_xvfrstp_b(a: v32i8, b: v32i8, c: v32i8) -> v32i8 { + unsafe { __lasx_xvfrstp_b(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfrstp_h(a: v16i16, b: v16i16, c: v16i16) -> v16i16 { - __lasx_xvfrstp_h(a, b, c) +pub fn lasx_xvfrstp_h(a: v16i16, b: v16i16, c: v16i16) -> v16i16 { + unsafe { __lasx_xvfrstp_h(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvshuf4i_d(a: v4i64, b: v4i64) -> v4i64 { +pub fn lasx_xvshuf4i_d(a: v4i64, b: v4i64) -> v4i64 { static_assert_uimm_bits!(IMM8, 8); - __lasx_xvshuf4i_d(a, b, IMM8) + unsafe { __lasx_xvshuf4i_d(a, b, IMM8) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvbsrl_v(a: v32i8) -> v32i8 { +pub fn lasx_xvbsrl_v(a: v32i8) -> v32i8 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvbsrl_v(a, IMM5) + unsafe { __lasx_xvbsrl_v(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvbsll_v(a: v32i8) -> v32i8 { +pub fn lasx_xvbsll_v(a: v32i8) -> v32i8 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvbsll_v(a, IMM5) + unsafe { __lasx_xvbsll_v(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvextrins_b(a: v32i8, b: v32i8) -> v32i8 { +pub fn lasx_xvextrins_b(a: v32i8, b: v32i8) -> v32i8 { static_assert_uimm_bits!(IMM8, 8); - __lasx_xvextrins_b(a, b, IMM8) + unsafe { __lasx_xvextrins_b(a, b, IMM8) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvextrins_h(a: v16i16, b: v16i16) -> v16i16 { +pub fn lasx_xvextrins_h(a: v16i16, b: v16i16) -> v16i16 { static_assert_uimm_bits!(IMM8, 8); - __lasx_xvextrins_h(a, b, IMM8) + unsafe { __lasx_xvextrins_h(a, b, IMM8) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvextrins_w(a: v8i32, b: v8i32) -> v8i32 { +pub fn lasx_xvextrins_w(a: v8i32, b: v8i32) -> v8i32 { static_assert_uimm_bits!(IMM8, 8); - __lasx_xvextrins_w(a, b, IMM8) + unsafe { __lasx_xvextrins_w(a, b, IMM8) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvextrins_d(a: v4i64, b: v4i64) -> v4i64 { +pub fn lasx_xvextrins_d(a: v4i64, b: v4i64) -> v4i64 { static_assert_uimm_bits!(IMM8, 8); - __lasx_xvextrins_d(a, b, IMM8) + unsafe { __lasx_xvextrins_d(a, b, IMM8) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmskltz_b(a: v32i8) -> v32i8 { - __lasx_xvmskltz_b(a) +pub fn lasx_xvmskltz_b(a: v32i8) -> v32i8 { + unsafe { __lasx_xvmskltz_b(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmskltz_h(a: v16i16) -> v16i16 { - __lasx_xvmskltz_h(a) +pub fn lasx_xvmskltz_h(a: v16i16) -> v16i16 { + unsafe { __lasx_xvmskltz_h(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmskltz_w(a: v8i32) -> v8i32 { - __lasx_xvmskltz_w(a) +pub fn lasx_xvmskltz_w(a: v8i32) -> v8i32 { + unsafe { __lasx_xvmskltz_w(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmskltz_d(a: v4i64) -> v4i64 { - __lasx_xvmskltz_d(a) +pub fn lasx_xvmskltz_d(a: v4i64) -> v4i64 { + unsafe { __lasx_xvmskltz_d(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsigncov_b(a: v32i8, b: v32i8) -> v32i8 { - __lasx_xvsigncov_b(a, b) +pub fn lasx_xvsigncov_b(a: v32i8, b: v32i8) -> v32i8 { + unsafe { __lasx_xvsigncov_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsigncov_h(a: v16i16, b: v16i16) -> v16i16 { - __lasx_xvsigncov_h(a, b) +pub fn lasx_xvsigncov_h(a: v16i16, b: v16i16) -> v16i16 { + unsafe { __lasx_xvsigncov_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsigncov_w(a: v8i32, b: v8i32) -> v8i32 { - __lasx_xvsigncov_w(a, b) +pub fn lasx_xvsigncov_w(a: v8i32, b: v8i32) -> v8i32 { + unsafe { __lasx_xvsigncov_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsigncov_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvsigncov_d(a, b) +pub fn lasx_xvsigncov_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvsigncov_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfmadd_s(a: v8f32, b: v8f32, c: v8f32) -> v8f32 { - __lasx_xvfmadd_s(a, b, c) +pub fn lasx_xvfmadd_s(a: v8f32, b: v8f32, c: v8f32) -> v8f32 { + unsafe { __lasx_xvfmadd_s(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfmadd_d(a: v4f64, b: v4f64, c: v4f64) -> v4f64 { - __lasx_xvfmadd_d(a, b, c) +pub fn lasx_xvfmadd_d(a: v4f64, b: v4f64, c: v4f64) -> v4f64 { + unsafe { __lasx_xvfmadd_d(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfmsub_s(a: v8f32, b: v8f32, c: v8f32) -> v8f32 { - __lasx_xvfmsub_s(a, b, c) +pub fn lasx_xvfmsub_s(a: v8f32, b: v8f32, c: v8f32) -> v8f32 { + unsafe { __lasx_xvfmsub_s(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfmsub_d(a: v4f64, b: v4f64, c: v4f64) -> v4f64 { - __lasx_xvfmsub_d(a, b, c) +pub fn lasx_xvfmsub_d(a: v4f64, b: v4f64, c: v4f64) -> v4f64 { + unsafe { __lasx_xvfmsub_d(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfnmadd_s(a: v8f32, b: v8f32, c: v8f32) -> v8f32 { - __lasx_xvfnmadd_s(a, b, c) +pub fn lasx_xvfnmadd_s(a: v8f32, b: v8f32, c: v8f32) -> v8f32 { + unsafe { __lasx_xvfnmadd_s(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfnmadd_d(a: v4f64, b: v4f64, c: v4f64) -> v4f64 { - __lasx_xvfnmadd_d(a, b, c) +pub fn lasx_xvfnmadd_d(a: v4f64, b: v4f64, c: v4f64) -> v4f64 { + unsafe { __lasx_xvfnmadd_d(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfnmsub_s(a: v8f32, b: v8f32, c: v8f32) -> v8f32 { - __lasx_xvfnmsub_s(a, b, c) +pub fn lasx_xvfnmsub_s(a: v8f32, b: v8f32, c: v8f32) -> v8f32 { + unsafe { __lasx_xvfnmsub_s(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfnmsub_d(a: v4f64, b: v4f64, c: v4f64) -> v4f64 { - __lasx_xvfnmsub_d(a, b, c) +pub fn lasx_xvfnmsub_d(a: v4f64, b: v4f64, c: v4f64) -> v4f64 { + unsafe { __lasx_xvfnmsub_d(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvftintrne_w_s(a: v8f32) -> v8i32 { - __lasx_xvftintrne_w_s(a) +pub fn lasx_xvftintrne_w_s(a: v8f32) -> v8i32 { + unsafe { __lasx_xvftintrne_w_s(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvftintrne_l_d(a: v4f64) -> v4i64 { - __lasx_xvftintrne_l_d(a) +pub fn lasx_xvftintrne_l_d(a: v4f64) -> v4i64 { + unsafe { __lasx_xvftintrne_l_d(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvftintrp_w_s(a: v8f32) -> v8i32 { - __lasx_xvftintrp_w_s(a) +pub fn lasx_xvftintrp_w_s(a: v8f32) -> v8i32 { + unsafe { __lasx_xvftintrp_w_s(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvftintrp_l_d(a: v4f64) -> v4i64 { - __lasx_xvftintrp_l_d(a) +pub fn lasx_xvftintrp_l_d(a: v4f64) -> v4i64 { + unsafe { __lasx_xvftintrp_l_d(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvftintrm_w_s(a: v8f32) -> v8i32 { - __lasx_xvftintrm_w_s(a) +pub fn lasx_xvftintrm_w_s(a: v8f32) -> v8i32 { + unsafe { __lasx_xvftintrm_w_s(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvftintrm_l_d(a: v4f64) -> v4i64 { - __lasx_xvftintrm_l_d(a) +pub fn lasx_xvftintrm_l_d(a: v4f64) -> v4i64 { + unsafe { __lasx_xvftintrm_l_d(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvftint_w_d(a: v4f64, b: v4f64) -> v8i32 { - __lasx_xvftint_w_d(a, b) +pub fn lasx_xvftint_w_d(a: v4f64, b: v4f64) -> v8i32 { + unsafe { __lasx_xvftint_w_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvffint_s_l(a: v4i64, b: v4i64) -> v8f32 { - __lasx_xvffint_s_l(a, b) +pub fn lasx_xvffint_s_l(a: v4i64, b: v4i64) -> v8f32 { + unsafe { __lasx_xvffint_s_l(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvftintrz_w_d(a: v4f64, b: v4f64) -> v8i32 { - __lasx_xvftintrz_w_d(a, b) +pub fn lasx_xvftintrz_w_d(a: v4f64, b: v4f64) -> v8i32 { + unsafe { __lasx_xvftintrz_w_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvftintrp_w_d(a: v4f64, b: v4f64) -> v8i32 { - __lasx_xvftintrp_w_d(a, b) +pub fn lasx_xvftintrp_w_d(a: v4f64, b: v4f64) -> v8i32 { + unsafe { __lasx_xvftintrp_w_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvftintrm_w_d(a: v4f64, b: v4f64) -> v8i32 { - __lasx_xvftintrm_w_d(a, b) +pub fn lasx_xvftintrm_w_d(a: v4f64, b: v4f64) -> v8i32 { + unsafe { __lasx_xvftintrm_w_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvftintrne_w_d(a: v4f64, b: v4f64) -> v8i32 { - __lasx_xvftintrne_w_d(a, b) +pub fn lasx_xvftintrne_w_d(a: v4f64, b: v4f64) -> v8i32 { + unsafe { __lasx_xvftintrne_w_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvftinth_l_s(a: v8f32) -> v4i64 { - __lasx_xvftinth_l_s(a) +pub fn lasx_xvftinth_l_s(a: v8f32) -> v4i64 { + unsafe { __lasx_xvftinth_l_s(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvftintl_l_s(a: v8f32) -> v4i64 { - __lasx_xvftintl_l_s(a) +pub fn lasx_xvftintl_l_s(a: v8f32) -> v4i64 { + unsafe { __lasx_xvftintl_l_s(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvffinth_d_w(a: v8i32) -> v4f64 { - __lasx_xvffinth_d_w(a) +pub fn lasx_xvffinth_d_w(a: v8i32) -> v4f64 { + unsafe { __lasx_xvffinth_d_w(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvffintl_d_w(a: v8i32) -> v4f64 { - __lasx_xvffintl_d_w(a) +pub fn lasx_xvffintl_d_w(a: v8i32) -> v4f64 { + unsafe { __lasx_xvffintl_d_w(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvftintrzh_l_s(a: v8f32) -> v4i64 { - __lasx_xvftintrzh_l_s(a) +pub fn lasx_xvftintrzh_l_s(a: v8f32) -> v4i64 { + unsafe { __lasx_xvftintrzh_l_s(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvftintrzl_l_s(a: v8f32) -> v4i64 { - __lasx_xvftintrzl_l_s(a) +pub fn lasx_xvftintrzl_l_s(a: v8f32) -> v4i64 { + unsafe { __lasx_xvftintrzl_l_s(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvftintrph_l_s(a: v8f32) -> v4i64 { - __lasx_xvftintrph_l_s(a) +pub fn lasx_xvftintrph_l_s(a: v8f32) -> v4i64 { + unsafe { __lasx_xvftintrph_l_s(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvftintrpl_l_s(a: v8f32) -> v4i64 { - __lasx_xvftintrpl_l_s(a) +pub fn lasx_xvftintrpl_l_s(a: v8f32) -> v4i64 { + unsafe { __lasx_xvftintrpl_l_s(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvftintrmh_l_s(a: v8f32) -> v4i64 { - __lasx_xvftintrmh_l_s(a) +pub fn lasx_xvftintrmh_l_s(a: v8f32) -> v4i64 { + unsafe { __lasx_xvftintrmh_l_s(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvftintrml_l_s(a: v8f32) -> v4i64 { - __lasx_xvftintrml_l_s(a) +pub fn lasx_xvftintrml_l_s(a: v8f32) -> v4i64 { + unsafe { __lasx_xvftintrml_l_s(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvftintrneh_l_s(a: v8f32) -> v4i64 { - __lasx_xvftintrneh_l_s(a) +pub fn lasx_xvftintrneh_l_s(a: v8f32) -> v4i64 { + unsafe { __lasx_xvftintrneh_l_s(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvftintrnel_l_s(a: v8f32) -> v4i64 { - __lasx_xvftintrnel_l_s(a) +pub fn lasx_xvftintrnel_l_s(a: v8f32) -> v4i64 { + unsafe { __lasx_xvftintrnel_l_s(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfrintrne_s(a: v8f32) -> v8f32 { - __lasx_xvfrintrne_s(a) +pub fn lasx_xvfrintrne_s(a: v8f32) -> v8f32 { + unsafe { __lasx_xvfrintrne_s(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfrintrne_d(a: v4f64) -> v4f64 { - __lasx_xvfrintrne_d(a) +pub fn lasx_xvfrintrne_d(a: v4f64) -> v4f64 { + unsafe { __lasx_xvfrintrne_d(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfrintrz_s(a: v8f32) -> v8f32 { - __lasx_xvfrintrz_s(a) +pub fn lasx_xvfrintrz_s(a: v8f32) -> v8f32 { + unsafe { __lasx_xvfrintrz_s(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfrintrz_d(a: v4f64) -> v4f64 { - __lasx_xvfrintrz_d(a) +pub fn lasx_xvfrintrz_d(a: v4f64) -> v4f64 { + unsafe { __lasx_xvfrintrz_d(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfrintrp_s(a: v8f32) -> v8f32 { - __lasx_xvfrintrp_s(a) +pub fn lasx_xvfrintrp_s(a: v8f32) -> v8f32 { + unsafe { __lasx_xvfrintrp_s(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfrintrp_d(a: v4f64) -> v4f64 { - __lasx_xvfrintrp_d(a) +pub fn lasx_xvfrintrp_d(a: v4f64) -> v4f64 { + unsafe { __lasx_xvfrintrp_d(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfrintrm_s(a: v8f32) -> v8f32 { - __lasx_xvfrintrm_s(a) +pub fn lasx_xvfrintrm_s(a: v8f32) -> v8f32 { + unsafe { __lasx_xvfrintrm_s(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfrintrm_d(a: v4f64) -> v4f64 { - __lasx_xvfrintrm_d(a) +pub fn lasx_xvfrintrm_d(a: v4f64) -> v4f64 { + unsafe { __lasx_xvfrintrm_d(a) } } #[inline] @@ -5054,94 +5054,94 @@ pub unsafe fn lasx_xvstelm_d(a: v4i64, mem_a #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvinsve0_w(a: v8i32, b: v8i32) -> v8i32 { +pub fn lasx_xvinsve0_w(a: v8i32, b: v8i32) -> v8i32 { static_assert_uimm_bits!(IMM3, 3); - __lasx_xvinsve0_w(a, b, IMM3) + unsafe { __lasx_xvinsve0_w(a, b, IMM3) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvinsve0_d(a: v4i64, b: v4i64) -> v4i64 { +pub fn lasx_xvinsve0_d(a: v4i64, b: v4i64) -> v4i64 { static_assert_uimm_bits!(IMM2, 2); - __lasx_xvinsve0_d(a, b, IMM2) + unsafe { __lasx_xvinsve0_d(a, b, IMM2) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvpickve_w(a: v8i32) -> v8i32 { +pub fn lasx_xvpickve_w(a: v8i32) -> v8i32 { static_assert_uimm_bits!(IMM3, 3); - __lasx_xvpickve_w(a, IMM3) + unsafe { __lasx_xvpickve_w(a, IMM3) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvpickve_d(a: v4i64) -> v4i64 { +pub fn lasx_xvpickve_d(a: v4i64) -> v4i64 { static_assert_uimm_bits!(IMM2, 2); - __lasx_xvpickve_d(a, IMM2) + unsafe { __lasx_xvpickve_d(a, IMM2) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrlrn_b_h(a: v16i16, b: v16i16) -> v32i8 { - __lasx_xvssrlrn_b_h(a, b) +pub fn lasx_xvssrlrn_b_h(a: v16i16, b: v16i16) -> v32i8 { + unsafe { __lasx_xvssrlrn_b_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrlrn_h_w(a: v8i32, b: v8i32) -> v16i16 { - __lasx_xvssrlrn_h_w(a, b) +pub fn lasx_xvssrlrn_h_w(a: v8i32, b: v8i32) -> v16i16 { + unsafe { __lasx_xvssrlrn_h_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrlrn_w_d(a: v4i64, b: v4i64) -> v8i32 { - __lasx_xvssrlrn_w_d(a, b) +pub fn lasx_xvssrlrn_w_d(a: v4i64, b: v4i64) -> v8i32 { + unsafe { __lasx_xvssrlrn_w_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrln_b_h(a: v16i16, b: v16i16) -> v32i8 { - __lasx_xvssrln_b_h(a, b) +pub fn lasx_xvssrln_b_h(a: v16i16, b: v16i16) -> v32i8 { + unsafe { __lasx_xvssrln_b_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrln_h_w(a: v8i32, b: v8i32) -> v16i16 { - __lasx_xvssrln_h_w(a, b) +pub fn lasx_xvssrln_h_w(a: v8i32, b: v8i32) -> v16i16 { + unsafe { __lasx_xvssrln_h_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrln_w_d(a: v4i64, b: v4i64) -> v8i32 { - __lasx_xvssrln_w_d(a, b) +pub fn lasx_xvssrln_w_d(a: v4i64, b: v4i64) -> v8i32 { + unsafe { __lasx_xvssrln_w_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvorn_v(a: v32i8, b: v32i8) -> v32i8 { - __lasx_xvorn_v(a, b) +pub fn lasx_xvorn_v(a: v32i8, b: v32i8) -> v32i8 { + unsafe { __lasx_xvorn_v(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(0)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvldi() -> v4i64 { +pub fn lasx_xvldi() -> v4i64 { static_assert_simm_bits!(IMM_S13, 13); - __lasx_xvldi(IMM_S13) + unsafe { __lasx_xvldi(IMM_S13) } } #[inline] @@ -5161,170 +5161,170 @@ pub unsafe fn lasx_xvstx(a: v32i8, mem_addr: *mut i8, b: i64) { #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvextl_qu_du(a: v4u64) -> v4u64 { - __lasx_xvextl_qu_du(a) +pub fn lasx_xvextl_qu_du(a: v4u64) -> v4u64 { + unsafe { __lasx_xvextl_qu_du(a) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvinsgr2vr_w(a: v8i32, b: i32) -> v8i32 { +pub fn lasx_xvinsgr2vr_w(a: v8i32, b: i32) -> v8i32 { static_assert_uimm_bits!(IMM3, 3); - __lasx_xvinsgr2vr_w(a, b, IMM3) + unsafe { __lasx_xvinsgr2vr_w(a, b, IMM3) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvinsgr2vr_d(a: v4i64, b: i64) -> v4i64 { +pub fn lasx_xvinsgr2vr_d(a: v4i64, b: i64) -> v4i64 { static_assert_uimm_bits!(IMM2, 2); - __lasx_xvinsgr2vr_d(a, b, IMM2) + unsafe { __lasx_xvinsgr2vr_d(a, b, IMM2) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvreplve0_b(a: v32i8) -> v32i8 { - __lasx_xvreplve0_b(a) +pub fn lasx_xvreplve0_b(a: v32i8) -> v32i8 { + unsafe { __lasx_xvreplve0_b(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvreplve0_h(a: v16i16) -> v16i16 { - __lasx_xvreplve0_h(a) +pub fn lasx_xvreplve0_h(a: v16i16) -> v16i16 { + unsafe { __lasx_xvreplve0_h(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvreplve0_w(a: v8i32) -> v8i32 { - __lasx_xvreplve0_w(a) +pub fn lasx_xvreplve0_w(a: v8i32) -> v8i32 { + unsafe { __lasx_xvreplve0_w(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvreplve0_d(a: v4i64) -> v4i64 { - __lasx_xvreplve0_d(a) +pub fn lasx_xvreplve0_d(a: v4i64) -> v4i64 { + unsafe { __lasx_xvreplve0_d(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvreplve0_q(a: v32i8) -> v32i8 { - __lasx_xvreplve0_q(a) +pub fn lasx_xvreplve0_q(a: v32i8) -> v32i8 { + unsafe { __lasx_xvreplve0_q(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_vext2xv_h_b(a: v32i8) -> v16i16 { - __lasx_vext2xv_h_b(a) +pub fn lasx_vext2xv_h_b(a: v32i8) -> v16i16 { + unsafe { __lasx_vext2xv_h_b(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_vext2xv_w_h(a: v16i16) -> v8i32 { - __lasx_vext2xv_w_h(a) +pub fn lasx_vext2xv_w_h(a: v16i16) -> v8i32 { + unsafe { __lasx_vext2xv_w_h(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_vext2xv_d_w(a: v8i32) -> v4i64 { - __lasx_vext2xv_d_w(a) +pub fn lasx_vext2xv_d_w(a: v8i32) -> v4i64 { + unsafe { __lasx_vext2xv_d_w(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_vext2xv_w_b(a: v32i8) -> v8i32 { - __lasx_vext2xv_w_b(a) +pub fn lasx_vext2xv_w_b(a: v32i8) -> v8i32 { + unsafe { __lasx_vext2xv_w_b(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_vext2xv_d_h(a: v16i16) -> v4i64 { - __lasx_vext2xv_d_h(a) +pub fn lasx_vext2xv_d_h(a: v16i16) -> v4i64 { + unsafe { __lasx_vext2xv_d_h(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_vext2xv_d_b(a: v32i8) -> v4i64 { - __lasx_vext2xv_d_b(a) +pub fn lasx_vext2xv_d_b(a: v32i8) -> v4i64 { + unsafe { __lasx_vext2xv_d_b(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_vext2xv_hu_bu(a: v32i8) -> v16i16 { - __lasx_vext2xv_hu_bu(a) +pub fn lasx_vext2xv_hu_bu(a: v32i8) -> v16i16 { + unsafe { __lasx_vext2xv_hu_bu(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_vext2xv_wu_hu(a: v16i16) -> v8i32 { - __lasx_vext2xv_wu_hu(a) +pub fn lasx_vext2xv_wu_hu(a: v16i16) -> v8i32 { + unsafe { __lasx_vext2xv_wu_hu(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_vext2xv_du_wu(a: v8i32) -> v4i64 { - __lasx_vext2xv_du_wu(a) +pub fn lasx_vext2xv_du_wu(a: v8i32) -> v4i64 { + unsafe { __lasx_vext2xv_du_wu(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_vext2xv_wu_bu(a: v32i8) -> v8i32 { - __lasx_vext2xv_wu_bu(a) +pub fn lasx_vext2xv_wu_bu(a: v32i8) -> v8i32 { + unsafe { __lasx_vext2xv_wu_bu(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_vext2xv_du_hu(a: v16i16) -> v4i64 { - __lasx_vext2xv_du_hu(a) +pub fn lasx_vext2xv_du_hu(a: v16i16) -> v4i64 { + unsafe { __lasx_vext2xv_du_hu(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_vext2xv_du_bu(a: v32i8) -> v4i64 { - __lasx_vext2xv_du_bu(a) +pub fn lasx_vext2xv_du_bu(a: v32i8) -> v4i64 { + unsafe { __lasx_vext2xv_du_bu(a) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvpermi_q(a: v32i8, b: v32i8) -> v32i8 { +pub fn lasx_xvpermi_q(a: v32i8, b: v32i8) -> v32i8 { static_assert_uimm_bits!(IMM8, 8); - __lasx_xvpermi_q(a, b, IMM8) + unsafe { __lasx_xvpermi_q(a, b, IMM8) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvpermi_d(a: v4i64) -> v4i64 { +pub fn lasx_xvpermi_d(a: v4i64) -> v4i64 { static_assert_uimm_bits!(IMM8, 8); - __lasx_xvpermi_d(a, IMM8) + unsafe { __lasx_xvpermi_d(a, IMM8) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvperm_w(a: v8i32, b: v8i32) -> v8i32 { - __lasx_xvperm_w(a, b) +pub fn lasx_xvperm_w(a: v8i32, b: v8i32) -> v8i32 { + unsafe { __lasx_xvperm_w(a, b) } } #[inline] @@ -5367,1697 +5367,1697 @@ pub unsafe fn lasx_xvldrepl_d(mem_addr: *const i8) -> v4i64 { #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvpickve2gr_w(a: v8i32) -> i32 { +pub fn lasx_xvpickve2gr_w(a: v8i32) -> i32 { static_assert_uimm_bits!(IMM3, 3); - __lasx_xvpickve2gr_w(a, IMM3) + unsafe { __lasx_xvpickve2gr_w(a, IMM3) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvpickve2gr_wu(a: v8i32) -> u32 { +pub fn lasx_xvpickve2gr_wu(a: v8i32) -> u32 { static_assert_uimm_bits!(IMM3, 3); - __lasx_xvpickve2gr_wu(a, IMM3) + unsafe { __lasx_xvpickve2gr_wu(a, IMM3) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvpickve2gr_d(a: v4i64) -> i64 { +pub fn lasx_xvpickve2gr_d(a: v4i64) -> i64 { static_assert_uimm_bits!(IMM2, 2); - __lasx_xvpickve2gr_d(a, IMM2) + unsafe { __lasx_xvpickve2gr_d(a, IMM2) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvpickve2gr_du(a: v4i64) -> u64 { +pub fn lasx_xvpickve2gr_du(a: v4i64) -> u64 { static_assert_uimm_bits!(IMM2, 2); - __lasx_xvpickve2gr_du(a, IMM2) + unsafe { __lasx_xvpickve2gr_du(a, IMM2) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvaddwev_q_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvaddwev_q_d(a, b) +pub fn lasx_xvaddwev_q_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvaddwev_q_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvaddwev_d_w(a: v8i32, b: v8i32) -> v4i64 { - __lasx_xvaddwev_d_w(a, b) +pub fn lasx_xvaddwev_d_w(a: v8i32, b: v8i32) -> v4i64 { + unsafe { __lasx_xvaddwev_d_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvaddwev_w_h(a: v16i16, b: v16i16) -> v8i32 { - __lasx_xvaddwev_w_h(a, b) +pub fn lasx_xvaddwev_w_h(a: v16i16, b: v16i16) -> v8i32 { + unsafe { __lasx_xvaddwev_w_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvaddwev_h_b(a: v32i8, b: v32i8) -> v16i16 { - __lasx_xvaddwev_h_b(a, b) +pub fn lasx_xvaddwev_h_b(a: v32i8, b: v32i8) -> v16i16 { + unsafe { __lasx_xvaddwev_h_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvaddwev_q_du(a: v4u64, b: v4u64) -> v4i64 { - __lasx_xvaddwev_q_du(a, b) +pub fn lasx_xvaddwev_q_du(a: v4u64, b: v4u64) -> v4i64 { + unsafe { __lasx_xvaddwev_q_du(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvaddwev_d_wu(a: v8u32, b: v8u32) -> v4i64 { - __lasx_xvaddwev_d_wu(a, b) +pub fn lasx_xvaddwev_d_wu(a: v8u32, b: v8u32) -> v4i64 { + unsafe { __lasx_xvaddwev_d_wu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvaddwev_w_hu(a: v16u16, b: v16u16) -> v8i32 { - __lasx_xvaddwev_w_hu(a, b) +pub fn lasx_xvaddwev_w_hu(a: v16u16, b: v16u16) -> v8i32 { + unsafe { __lasx_xvaddwev_w_hu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvaddwev_h_bu(a: v32u8, b: v32u8) -> v16i16 { - __lasx_xvaddwev_h_bu(a, b) +pub fn lasx_xvaddwev_h_bu(a: v32u8, b: v32u8) -> v16i16 { + unsafe { __lasx_xvaddwev_h_bu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsubwev_q_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvsubwev_q_d(a, b) +pub fn lasx_xvsubwev_q_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvsubwev_q_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsubwev_d_w(a: v8i32, b: v8i32) -> v4i64 { - __lasx_xvsubwev_d_w(a, b) +pub fn lasx_xvsubwev_d_w(a: v8i32, b: v8i32) -> v4i64 { + unsafe { __lasx_xvsubwev_d_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsubwev_w_h(a: v16i16, b: v16i16) -> v8i32 { - __lasx_xvsubwev_w_h(a, b) +pub fn lasx_xvsubwev_w_h(a: v16i16, b: v16i16) -> v8i32 { + unsafe { __lasx_xvsubwev_w_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsubwev_h_b(a: v32i8, b: v32i8) -> v16i16 { - __lasx_xvsubwev_h_b(a, b) +pub fn lasx_xvsubwev_h_b(a: v32i8, b: v32i8) -> v16i16 { + unsafe { __lasx_xvsubwev_h_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsubwev_q_du(a: v4u64, b: v4u64) -> v4i64 { - __lasx_xvsubwev_q_du(a, b) +pub fn lasx_xvsubwev_q_du(a: v4u64, b: v4u64) -> v4i64 { + unsafe { __lasx_xvsubwev_q_du(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsubwev_d_wu(a: v8u32, b: v8u32) -> v4i64 { - __lasx_xvsubwev_d_wu(a, b) +pub fn lasx_xvsubwev_d_wu(a: v8u32, b: v8u32) -> v4i64 { + unsafe { __lasx_xvsubwev_d_wu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsubwev_w_hu(a: v16u16, b: v16u16) -> v8i32 { - __lasx_xvsubwev_w_hu(a, b) +pub fn lasx_xvsubwev_w_hu(a: v16u16, b: v16u16) -> v8i32 { + unsafe { __lasx_xvsubwev_w_hu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsubwev_h_bu(a: v32u8, b: v32u8) -> v16i16 { - __lasx_xvsubwev_h_bu(a, b) +pub fn lasx_xvsubwev_h_bu(a: v32u8, b: v32u8) -> v16i16 { + unsafe { __lasx_xvsubwev_h_bu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmulwev_q_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvmulwev_q_d(a, b) +pub fn lasx_xvmulwev_q_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvmulwev_q_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmulwev_d_w(a: v8i32, b: v8i32) -> v4i64 { - __lasx_xvmulwev_d_w(a, b) +pub fn lasx_xvmulwev_d_w(a: v8i32, b: v8i32) -> v4i64 { + unsafe { __lasx_xvmulwev_d_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmulwev_w_h(a: v16i16, b: v16i16) -> v8i32 { - __lasx_xvmulwev_w_h(a, b) +pub fn lasx_xvmulwev_w_h(a: v16i16, b: v16i16) -> v8i32 { + unsafe { __lasx_xvmulwev_w_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmulwev_h_b(a: v32i8, b: v32i8) -> v16i16 { - __lasx_xvmulwev_h_b(a, b) +pub fn lasx_xvmulwev_h_b(a: v32i8, b: v32i8) -> v16i16 { + unsafe { __lasx_xvmulwev_h_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmulwev_q_du(a: v4u64, b: v4u64) -> v4i64 { - __lasx_xvmulwev_q_du(a, b) +pub fn lasx_xvmulwev_q_du(a: v4u64, b: v4u64) -> v4i64 { + unsafe { __lasx_xvmulwev_q_du(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmulwev_d_wu(a: v8u32, b: v8u32) -> v4i64 { - __lasx_xvmulwev_d_wu(a, b) +pub fn lasx_xvmulwev_d_wu(a: v8u32, b: v8u32) -> v4i64 { + unsafe { __lasx_xvmulwev_d_wu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmulwev_w_hu(a: v16u16, b: v16u16) -> v8i32 { - __lasx_xvmulwev_w_hu(a, b) +pub fn lasx_xvmulwev_w_hu(a: v16u16, b: v16u16) -> v8i32 { + unsafe { __lasx_xvmulwev_w_hu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmulwev_h_bu(a: v32u8, b: v32u8) -> v16i16 { - __lasx_xvmulwev_h_bu(a, b) +pub fn lasx_xvmulwev_h_bu(a: v32u8, b: v32u8) -> v16i16 { + unsafe { __lasx_xvmulwev_h_bu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvaddwod_q_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvaddwod_q_d(a, b) +pub fn lasx_xvaddwod_q_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvaddwod_q_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvaddwod_d_w(a: v8i32, b: v8i32) -> v4i64 { - __lasx_xvaddwod_d_w(a, b) +pub fn lasx_xvaddwod_d_w(a: v8i32, b: v8i32) -> v4i64 { + unsafe { __lasx_xvaddwod_d_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvaddwod_w_h(a: v16i16, b: v16i16) -> v8i32 { - __lasx_xvaddwod_w_h(a, b) +pub fn lasx_xvaddwod_w_h(a: v16i16, b: v16i16) -> v8i32 { + unsafe { __lasx_xvaddwod_w_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvaddwod_h_b(a: v32i8, b: v32i8) -> v16i16 { - __lasx_xvaddwod_h_b(a, b) +pub fn lasx_xvaddwod_h_b(a: v32i8, b: v32i8) -> v16i16 { + unsafe { __lasx_xvaddwod_h_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvaddwod_q_du(a: v4u64, b: v4u64) -> v4i64 { - __lasx_xvaddwod_q_du(a, b) +pub fn lasx_xvaddwod_q_du(a: v4u64, b: v4u64) -> v4i64 { + unsafe { __lasx_xvaddwod_q_du(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvaddwod_d_wu(a: v8u32, b: v8u32) -> v4i64 { - __lasx_xvaddwod_d_wu(a, b) +pub fn lasx_xvaddwod_d_wu(a: v8u32, b: v8u32) -> v4i64 { + unsafe { __lasx_xvaddwod_d_wu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvaddwod_w_hu(a: v16u16, b: v16u16) -> v8i32 { - __lasx_xvaddwod_w_hu(a, b) +pub fn lasx_xvaddwod_w_hu(a: v16u16, b: v16u16) -> v8i32 { + unsafe { __lasx_xvaddwod_w_hu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvaddwod_h_bu(a: v32u8, b: v32u8) -> v16i16 { - __lasx_xvaddwod_h_bu(a, b) +pub fn lasx_xvaddwod_h_bu(a: v32u8, b: v32u8) -> v16i16 { + unsafe { __lasx_xvaddwod_h_bu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsubwod_q_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvsubwod_q_d(a, b) +pub fn lasx_xvsubwod_q_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvsubwod_q_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsubwod_d_w(a: v8i32, b: v8i32) -> v4i64 { - __lasx_xvsubwod_d_w(a, b) +pub fn lasx_xvsubwod_d_w(a: v8i32, b: v8i32) -> v4i64 { + unsafe { __lasx_xvsubwod_d_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsubwod_w_h(a: v16i16, b: v16i16) -> v8i32 { - __lasx_xvsubwod_w_h(a, b) +pub fn lasx_xvsubwod_w_h(a: v16i16, b: v16i16) -> v8i32 { + unsafe { __lasx_xvsubwod_w_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsubwod_h_b(a: v32i8, b: v32i8) -> v16i16 { - __lasx_xvsubwod_h_b(a, b) +pub fn lasx_xvsubwod_h_b(a: v32i8, b: v32i8) -> v16i16 { + unsafe { __lasx_xvsubwod_h_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsubwod_q_du(a: v4u64, b: v4u64) -> v4i64 { - __lasx_xvsubwod_q_du(a, b) +pub fn lasx_xvsubwod_q_du(a: v4u64, b: v4u64) -> v4i64 { + unsafe { __lasx_xvsubwod_q_du(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsubwod_d_wu(a: v8u32, b: v8u32) -> v4i64 { - __lasx_xvsubwod_d_wu(a, b) +pub fn lasx_xvsubwod_d_wu(a: v8u32, b: v8u32) -> v4i64 { + unsafe { __lasx_xvsubwod_d_wu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsubwod_w_hu(a: v16u16, b: v16u16) -> v8i32 { - __lasx_xvsubwod_w_hu(a, b) +pub fn lasx_xvsubwod_w_hu(a: v16u16, b: v16u16) -> v8i32 { + unsafe { __lasx_xvsubwod_w_hu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsubwod_h_bu(a: v32u8, b: v32u8) -> v16i16 { - __lasx_xvsubwod_h_bu(a, b) +pub fn lasx_xvsubwod_h_bu(a: v32u8, b: v32u8) -> v16i16 { + unsafe { __lasx_xvsubwod_h_bu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmulwod_q_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvmulwod_q_d(a, b) +pub fn lasx_xvmulwod_q_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvmulwod_q_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmulwod_d_w(a: v8i32, b: v8i32) -> v4i64 { - __lasx_xvmulwod_d_w(a, b) +pub fn lasx_xvmulwod_d_w(a: v8i32, b: v8i32) -> v4i64 { + unsafe { __lasx_xvmulwod_d_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmulwod_w_h(a: v16i16, b: v16i16) -> v8i32 { - __lasx_xvmulwod_w_h(a, b) +pub fn lasx_xvmulwod_w_h(a: v16i16, b: v16i16) -> v8i32 { + unsafe { __lasx_xvmulwod_w_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmulwod_h_b(a: v32i8, b: v32i8) -> v16i16 { - __lasx_xvmulwod_h_b(a, b) +pub fn lasx_xvmulwod_h_b(a: v32i8, b: v32i8) -> v16i16 { + unsafe { __lasx_xvmulwod_h_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmulwod_q_du(a: v4u64, b: v4u64) -> v4i64 { - __lasx_xvmulwod_q_du(a, b) +pub fn lasx_xvmulwod_q_du(a: v4u64, b: v4u64) -> v4i64 { + unsafe { __lasx_xvmulwod_q_du(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmulwod_d_wu(a: v8u32, b: v8u32) -> v4i64 { - __lasx_xvmulwod_d_wu(a, b) +pub fn lasx_xvmulwod_d_wu(a: v8u32, b: v8u32) -> v4i64 { + unsafe { __lasx_xvmulwod_d_wu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmulwod_w_hu(a: v16u16, b: v16u16) -> v8i32 { - __lasx_xvmulwod_w_hu(a, b) +pub fn lasx_xvmulwod_w_hu(a: v16u16, b: v16u16) -> v8i32 { + unsafe { __lasx_xvmulwod_w_hu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmulwod_h_bu(a: v32u8, b: v32u8) -> v16i16 { - __lasx_xvmulwod_h_bu(a, b) +pub fn lasx_xvmulwod_h_bu(a: v32u8, b: v32u8) -> v16i16 { + unsafe { __lasx_xvmulwod_h_bu(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvaddwev_d_wu_w(a: v8u32, b: v8i32) -> v4i64 { - __lasx_xvaddwev_d_wu_w(a, b) +pub fn lasx_xvaddwev_d_wu_w(a: v8u32, b: v8i32) -> v4i64 { + unsafe { __lasx_xvaddwev_d_wu_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvaddwev_w_hu_h(a: v16u16, b: v16i16) -> v8i32 { - __lasx_xvaddwev_w_hu_h(a, b) +pub fn lasx_xvaddwev_w_hu_h(a: v16u16, b: v16i16) -> v8i32 { + unsafe { __lasx_xvaddwev_w_hu_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvaddwev_h_bu_b(a: v32u8, b: v32i8) -> v16i16 { - __lasx_xvaddwev_h_bu_b(a, b) +pub fn lasx_xvaddwev_h_bu_b(a: v32u8, b: v32i8) -> v16i16 { + unsafe { __lasx_xvaddwev_h_bu_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmulwev_d_wu_w(a: v8u32, b: v8i32) -> v4i64 { - __lasx_xvmulwev_d_wu_w(a, b) +pub fn lasx_xvmulwev_d_wu_w(a: v8u32, b: v8i32) -> v4i64 { + unsafe { __lasx_xvmulwev_d_wu_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmulwev_w_hu_h(a: v16u16, b: v16i16) -> v8i32 { - __lasx_xvmulwev_w_hu_h(a, b) +pub fn lasx_xvmulwev_w_hu_h(a: v16u16, b: v16i16) -> v8i32 { + unsafe { __lasx_xvmulwev_w_hu_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmulwev_h_bu_b(a: v32u8, b: v32i8) -> v16i16 { - __lasx_xvmulwev_h_bu_b(a, b) +pub fn lasx_xvmulwev_h_bu_b(a: v32u8, b: v32i8) -> v16i16 { + unsafe { __lasx_xvmulwev_h_bu_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvaddwod_d_wu_w(a: v8u32, b: v8i32) -> v4i64 { - __lasx_xvaddwod_d_wu_w(a, b) +pub fn lasx_xvaddwod_d_wu_w(a: v8u32, b: v8i32) -> v4i64 { + unsafe { __lasx_xvaddwod_d_wu_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvaddwod_w_hu_h(a: v16u16, b: v16i16) -> v8i32 { - __lasx_xvaddwod_w_hu_h(a, b) +pub fn lasx_xvaddwod_w_hu_h(a: v16u16, b: v16i16) -> v8i32 { + unsafe { __lasx_xvaddwod_w_hu_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvaddwod_h_bu_b(a: v32u8, b: v32i8) -> v16i16 { - __lasx_xvaddwod_h_bu_b(a, b) +pub fn lasx_xvaddwod_h_bu_b(a: v32u8, b: v32i8) -> v16i16 { + unsafe { __lasx_xvaddwod_h_bu_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmulwod_d_wu_w(a: v8u32, b: v8i32) -> v4i64 { - __lasx_xvmulwod_d_wu_w(a, b) +pub fn lasx_xvmulwod_d_wu_w(a: v8u32, b: v8i32) -> v4i64 { + unsafe { __lasx_xvmulwod_d_wu_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmulwod_w_hu_h(a: v16u16, b: v16i16) -> v8i32 { - __lasx_xvmulwod_w_hu_h(a, b) +pub fn lasx_xvmulwod_w_hu_h(a: v16u16, b: v16i16) -> v8i32 { + unsafe { __lasx_xvmulwod_w_hu_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmulwod_h_bu_b(a: v32u8, b: v32i8) -> v16i16 { - __lasx_xvmulwod_h_bu_b(a, b) +pub fn lasx_xvmulwod_h_bu_b(a: v32u8, b: v32i8) -> v16i16 { + unsafe { __lasx_xvmulwod_h_bu_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvhaddw_q_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvhaddw_q_d(a, b) +pub fn lasx_xvhaddw_q_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvhaddw_q_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvhaddw_qu_du(a: v4u64, b: v4u64) -> v4u64 { - __lasx_xvhaddw_qu_du(a, b) +pub fn lasx_xvhaddw_qu_du(a: v4u64, b: v4u64) -> v4u64 { + unsafe { __lasx_xvhaddw_qu_du(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvhsubw_q_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvhsubw_q_d(a, b) +pub fn lasx_xvhsubw_q_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvhsubw_q_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvhsubw_qu_du(a: v4u64, b: v4u64) -> v4u64 { - __lasx_xvhsubw_qu_du(a, b) +pub fn lasx_xvhsubw_qu_du(a: v4u64, b: v4u64) -> v4u64 { + unsafe { __lasx_xvhsubw_qu_du(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmaddwev_q_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64 { - __lasx_xvmaddwev_q_d(a, b, c) +pub fn lasx_xvmaddwev_q_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64 { + unsafe { __lasx_xvmaddwev_q_d(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmaddwev_d_w(a: v4i64, b: v8i32, c: v8i32) -> v4i64 { - __lasx_xvmaddwev_d_w(a, b, c) +pub fn lasx_xvmaddwev_d_w(a: v4i64, b: v8i32, c: v8i32) -> v4i64 { + unsafe { __lasx_xvmaddwev_d_w(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmaddwev_w_h(a: v8i32, b: v16i16, c: v16i16) -> v8i32 { - __lasx_xvmaddwev_w_h(a, b, c) +pub fn lasx_xvmaddwev_w_h(a: v8i32, b: v16i16, c: v16i16) -> v8i32 { + unsafe { __lasx_xvmaddwev_w_h(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmaddwev_h_b(a: v16i16, b: v32i8, c: v32i8) -> v16i16 { - __lasx_xvmaddwev_h_b(a, b, c) +pub fn lasx_xvmaddwev_h_b(a: v16i16, b: v32i8, c: v32i8) -> v16i16 { + unsafe { __lasx_xvmaddwev_h_b(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmaddwev_q_du(a: v4u64, b: v4u64, c: v4u64) -> v4u64 { - __lasx_xvmaddwev_q_du(a, b, c) +pub fn lasx_xvmaddwev_q_du(a: v4u64, b: v4u64, c: v4u64) -> v4u64 { + unsafe { __lasx_xvmaddwev_q_du(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmaddwev_d_wu(a: v4u64, b: v8u32, c: v8u32) -> v4u64 { - __lasx_xvmaddwev_d_wu(a, b, c) +pub fn lasx_xvmaddwev_d_wu(a: v4u64, b: v8u32, c: v8u32) -> v4u64 { + unsafe { __lasx_xvmaddwev_d_wu(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmaddwev_w_hu(a: v8u32, b: v16u16, c: v16u16) -> v8u32 { - __lasx_xvmaddwev_w_hu(a, b, c) +pub fn lasx_xvmaddwev_w_hu(a: v8u32, b: v16u16, c: v16u16) -> v8u32 { + unsafe { __lasx_xvmaddwev_w_hu(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmaddwev_h_bu(a: v16u16, b: v32u8, c: v32u8) -> v16u16 { - __lasx_xvmaddwev_h_bu(a, b, c) +pub fn lasx_xvmaddwev_h_bu(a: v16u16, b: v32u8, c: v32u8) -> v16u16 { + unsafe { __lasx_xvmaddwev_h_bu(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmaddwod_q_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64 { - __lasx_xvmaddwod_q_d(a, b, c) +pub fn lasx_xvmaddwod_q_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64 { + unsafe { __lasx_xvmaddwod_q_d(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmaddwod_d_w(a: v4i64, b: v8i32, c: v8i32) -> v4i64 { - __lasx_xvmaddwod_d_w(a, b, c) +pub fn lasx_xvmaddwod_d_w(a: v4i64, b: v8i32, c: v8i32) -> v4i64 { + unsafe { __lasx_xvmaddwod_d_w(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmaddwod_w_h(a: v8i32, b: v16i16, c: v16i16) -> v8i32 { - __lasx_xvmaddwod_w_h(a, b, c) +pub fn lasx_xvmaddwod_w_h(a: v8i32, b: v16i16, c: v16i16) -> v8i32 { + unsafe { __lasx_xvmaddwod_w_h(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmaddwod_h_b(a: v16i16, b: v32i8, c: v32i8) -> v16i16 { - __lasx_xvmaddwod_h_b(a, b, c) +pub fn lasx_xvmaddwod_h_b(a: v16i16, b: v32i8, c: v32i8) -> v16i16 { + unsafe { __lasx_xvmaddwod_h_b(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmaddwod_q_du(a: v4u64, b: v4u64, c: v4u64) -> v4u64 { - __lasx_xvmaddwod_q_du(a, b, c) +pub fn lasx_xvmaddwod_q_du(a: v4u64, b: v4u64, c: v4u64) -> v4u64 { + unsafe { __lasx_xvmaddwod_q_du(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmaddwod_d_wu(a: v4u64, b: v8u32, c: v8u32) -> v4u64 { - __lasx_xvmaddwod_d_wu(a, b, c) +pub fn lasx_xvmaddwod_d_wu(a: v4u64, b: v8u32, c: v8u32) -> v4u64 { + unsafe { __lasx_xvmaddwod_d_wu(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmaddwod_w_hu(a: v8u32, b: v16u16, c: v16u16) -> v8u32 { - __lasx_xvmaddwod_w_hu(a, b, c) +pub fn lasx_xvmaddwod_w_hu(a: v8u32, b: v16u16, c: v16u16) -> v8u32 { + unsafe { __lasx_xvmaddwod_w_hu(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmaddwod_h_bu(a: v16u16, b: v32u8, c: v32u8) -> v16u16 { - __lasx_xvmaddwod_h_bu(a, b, c) +pub fn lasx_xvmaddwod_h_bu(a: v16u16, b: v32u8, c: v32u8) -> v16u16 { + unsafe { __lasx_xvmaddwod_h_bu(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmaddwev_q_du_d(a: v4i64, b: v4u64, c: v4i64) -> v4i64 { - __lasx_xvmaddwev_q_du_d(a, b, c) +pub fn lasx_xvmaddwev_q_du_d(a: v4i64, b: v4u64, c: v4i64) -> v4i64 { + unsafe { __lasx_xvmaddwev_q_du_d(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmaddwev_d_wu_w(a: v4i64, b: v8u32, c: v8i32) -> v4i64 { - __lasx_xvmaddwev_d_wu_w(a, b, c) +pub fn lasx_xvmaddwev_d_wu_w(a: v4i64, b: v8u32, c: v8i32) -> v4i64 { + unsafe { __lasx_xvmaddwev_d_wu_w(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmaddwev_w_hu_h(a: v8i32, b: v16u16, c: v16i16) -> v8i32 { - __lasx_xvmaddwev_w_hu_h(a, b, c) +pub fn lasx_xvmaddwev_w_hu_h(a: v8i32, b: v16u16, c: v16i16) -> v8i32 { + unsafe { __lasx_xvmaddwev_w_hu_h(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmaddwev_h_bu_b(a: v16i16, b: v32u8, c: v32i8) -> v16i16 { - __lasx_xvmaddwev_h_bu_b(a, b, c) +pub fn lasx_xvmaddwev_h_bu_b(a: v16i16, b: v32u8, c: v32i8) -> v16i16 { + unsafe { __lasx_xvmaddwev_h_bu_b(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmaddwod_q_du_d(a: v4i64, b: v4u64, c: v4i64) -> v4i64 { - __lasx_xvmaddwod_q_du_d(a, b, c) +pub fn lasx_xvmaddwod_q_du_d(a: v4i64, b: v4u64, c: v4i64) -> v4i64 { + unsafe { __lasx_xvmaddwod_q_du_d(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmaddwod_d_wu_w(a: v4i64, b: v8u32, c: v8i32) -> v4i64 { - __lasx_xvmaddwod_d_wu_w(a, b, c) +pub fn lasx_xvmaddwod_d_wu_w(a: v4i64, b: v8u32, c: v8i32) -> v4i64 { + unsafe { __lasx_xvmaddwod_d_wu_w(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmaddwod_w_hu_h(a: v8i32, b: v16u16, c: v16i16) -> v8i32 { - __lasx_xvmaddwod_w_hu_h(a, b, c) +pub fn lasx_xvmaddwod_w_hu_h(a: v8i32, b: v16u16, c: v16i16) -> v8i32 { + unsafe { __lasx_xvmaddwod_w_hu_h(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmaddwod_h_bu_b(a: v16i16, b: v32u8, c: v32i8) -> v16i16 { - __lasx_xvmaddwod_h_bu_b(a, b, c) +pub fn lasx_xvmaddwod_h_bu_b(a: v16i16, b: v32u8, c: v32i8) -> v16i16 { + unsafe { __lasx_xvmaddwod_h_bu_b(a, b, c) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvrotr_b(a: v32i8, b: v32i8) -> v32i8 { - __lasx_xvrotr_b(a, b) +pub fn lasx_xvrotr_b(a: v32i8, b: v32i8) -> v32i8 { + unsafe { __lasx_xvrotr_b(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvrotr_h(a: v16i16, b: v16i16) -> v16i16 { - __lasx_xvrotr_h(a, b) +pub fn lasx_xvrotr_h(a: v16i16, b: v16i16) -> v16i16 { + unsafe { __lasx_xvrotr_h(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvrotr_w(a: v8i32, b: v8i32) -> v8i32 { - __lasx_xvrotr_w(a, b) +pub fn lasx_xvrotr_w(a: v8i32, b: v8i32) -> v8i32 { + unsafe { __lasx_xvrotr_w(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvrotr_d(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvrotr_d(a, b) +pub fn lasx_xvrotr_d(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvrotr_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvadd_q(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvadd_q(a, b) +pub fn lasx_xvadd_q(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvadd_q(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsub_q(a: v4i64, b: v4i64) -> v4i64 { - __lasx_xvsub_q(a, b) +pub fn lasx_xvsub_q(a: v4i64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvsub_q(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvaddwev_q_du_d(a: v4u64, b: v4i64) -> v4i64 { - __lasx_xvaddwev_q_du_d(a, b) +pub fn lasx_xvaddwev_q_du_d(a: v4u64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvaddwev_q_du_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvaddwod_q_du_d(a: v4u64, b: v4i64) -> v4i64 { - __lasx_xvaddwod_q_du_d(a, b) +pub fn lasx_xvaddwod_q_du_d(a: v4u64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvaddwod_q_du_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmulwev_q_du_d(a: v4u64, b: v4i64) -> v4i64 { - __lasx_xvmulwev_q_du_d(a, b) +pub fn lasx_xvmulwev_q_du_d(a: v4u64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvmulwev_q_du_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmulwod_q_du_d(a: v4u64, b: v4i64) -> v4i64 { - __lasx_xvmulwod_q_du_d(a, b) +pub fn lasx_xvmulwod_q_du_d(a: v4u64, b: v4i64) -> v4i64 { + unsafe { __lasx_xvmulwod_q_du_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmskgez_b(a: v32i8) -> v32i8 { - __lasx_xvmskgez_b(a) +pub fn lasx_xvmskgez_b(a: v32i8) -> v32i8 { + unsafe { __lasx_xvmskgez_b(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvmsknz_b(a: v32i8) -> v32i8 { - __lasx_xvmsknz_b(a) +pub fn lasx_xvmsknz_b(a: v32i8) -> v32i8 { + unsafe { __lasx_xvmsknz_b(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvexth_h_b(a: v32i8) -> v16i16 { - __lasx_xvexth_h_b(a) +pub fn lasx_xvexth_h_b(a: v32i8) -> v16i16 { + unsafe { __lasx_xvexth_h_b(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvexth_w_h(a: v16i16) -> v8i32 { - __lasx_xvexth_w_h(a) +pub fn lasx_xvexth_w_h(a: v16i16) -> v8i32 { + unsafe { __lasx_xvexth_w_h(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvexth_d_w(a: v8i32) -> v4i64 { - __lasx_xvexth_d_w(a) +pub fn lasx_xvexth_d_w(a: v8i32) -> v4i64 { + unsafe { __lasx_xvexth_d_w(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvexth_q_d(a: v4i64) -> v4i64 { - __lasx_xvexth_q_d(a) +pub fn lasx_xvexth_q_d(a: v4i64) -> v4i64 { + unsafe { __lasx_xvexth_q_d(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvexth_hu_bu(a: v32u8) -> v16u16 { - __lasx_xvexth_hu_bu(a) +pub fn lasx_xvexth_hu_bu(a: v32u8) -> v16u16 { + unsafe { __lasx_xvexth_hu_bu(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvexth_wu_hu(a: v16u16) -> v8u32 { - __lasx_xvexth_wu_hu(a) +pub fn lasx_xvexth_wu_hu(a: v16u16) -> v8u32 { + unsafe { __lasx_xvexth_wu_hu(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvexth_du_wu(a: v8u32) -> v4u64 { - __lasx_xvexth_du_wu(a) +pub fn lasx_xvexth_du_wu(a: v8u32) -> v4u64 { + unsafe { __lasx_xvexth_du_wu(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvexth_qu_du(a: v4u64) -> v4u64 { - __lasx_xvexth_qu_du(a) +pub fn lasx_xvexth_qu_du(a: v4u64) -> v4u64 { + unsafe { __lasx_xvexth_qu_du(a) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvrotri_b(a: v32i8) -> v32i8 { +pub fn lasx_xvrotri_b(a: v32i8) -> v32i8 { static_assert_uimm_bits!(IMM3, 3); - __lasx_xvrotri_b(a, IMM3) + unsafe { __lasx_xvrotri_b(a, IMM3) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvrotri_h(a: v16i16) -> v16i16 { +pub fn lasx_xvrotri_h(a: v16i16) -> v16i16 { static_assert_uimm_bits!(IMM4, 4); - __lasx_xvrotri_h(a, IMM4) + unsafe { __lasx_xvrotri_h(a, IMM4) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvrotri_w(a: v8i32) -> v8i32 { +pub fn lasx_xvrotri_w(a: v8i32) -> v8i32 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvrotri_w(a, IMM5) + unsafe { __lasx_xvrotri_w(a, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvrotri_d(a: v4i64) -> v4i64 { +pub fn lasx_xvrotri_d(a: v4i64) -> v4i64 { static_assert_uimm_bits!(IMM6, 6); - __lasx_xvrotri_d(a, IMM6) + unsafe { __lasx_xvrotri_d(a, IMM6) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvextl_q_d(a: v4i64) -> v4i64 { - __lasx_xvextl_q_d(a) +pub fn lasx_xvextl_q_d(a: v4i64) -> v4i64 { + unsafe { __lasx_xvextl_q_d(a) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrlni_b_h(a: v32i8, b: v32i8) -> v32i8 { +pub fn lasx_xvsrlni_b_h(a: v32i8, b: v32i8) -> v32i8 { static_assert_uimm_bits!(IMM4, 4); - __lasx_xvsrlni_b_h(a, b, IMM4) + unsafe { __lasx_xvsrlni_b_h(a, b, IMM4) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrlni_h_w(a: v16i16, b: v16i16) -> v16i16 { +pub fn lasx_xvsrlni_h_w(a: v16i16, b: v16i16) -> v16i16 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvsrlni_h_w(a, b, IMM5) + unsafe { __lasx_xvsrlni_h_w(a, b, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrlni_w_d(a: v8i32, b: v8i32) -> v8i32 { +pub fn lasx_xvsrlni_w_d(a: v8i32, b: v8i32) -> v8i32 { static_assert_uimm_bits!(IMM6, 6); - __lasx_xvsrlni_w_d(a, b, IMM6) + unsafe { __lasx_xvsrlni_w_d(a, b, IMM6) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrlni_d_q(a: v4i64, b: v4i64) -> v4i64 { +pub fn lasx_xvsrlni_d_q(a: v4i64, b: v4i64) -> v4i64 { static_assert_uimm_bits!(IMM7, 7); - __lasx_xvsrlni_d_q(a, b, IMM7) + unsafe { __lasx_xvsrlni_d_q(a, b, IMM7) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrlrni_b_h(a: v32i8, b: v32i8) -> v32i8 { +pub fn lasx_xvsrlrni_b_h(a: v32i8, b: v32i8) -> v32i8 { static_assert_uimm_bits!(IMM4, 4); - __lasx_xvsrlrni_b_h(a, b, IMM4) + unsafe { __lasx_xvsrlrni_b_h(a, b, IMM4) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrlrni_h_w(a: v16i16, b: v16i16) -> v16i16 { +pub fn lasx_xvsrlrni_h_w(a: v16i16, b: v16i16) -> v16i16 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvsrlrni_h_w(a, b, IMM5) + unsafe { __lasx_xvsrlrni_h_w(a, b, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrlrni_w_d(a: v8i32, b: v8i32) -> v8i32 { +pub fn lasx_xvsrlrni_w_d(a: v8i32, b: v8i32) -> v8i32 { static_assert_uimm_bits!(IMM6, 6); - __lasx_xvsrlrni_w_d(a, b, IMM6) + unsafe { __lasx_xvsrlrni_w_d(a, b, IMM6) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrlrni_d_q(a: v4i64, b: v4i64) -> v4i64 { +pub fn lasx_xvsrlrni_d_q(a: v4i64, b: v4i64) -> v4i64 { static_assert_uimm_bits!(IMM7, 7); - __lasx_xvsrlrni_d_q(a, b, IMM7) + unsafe { __lasx_xvsrlrni_d_q(a, b, IMM7) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrlni_b_h(a: v32i8, b: v32i8) -> v32i8 { +pub fn lasx_xvssrlni_b_h(a: v32i8, b: v32i8) -> v32i8 { static_assert_uimm_bits!(IMM4, 4); - __lasx_xvssrlni_b_h(a, b, IMM4) + unsafe { __lasx_xvssrlni_b_h(a, b, IMM4) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrlni_h_w(a: v16i16, b: v16i16) -> v16i16 { +pub fn lasx_xvssrlni_h_w(a: v16i16, b: v16i16) -> v16i16 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvssrlni_h_w(a, b, IMM5) + unsafe { __lasx_xvssrlni_h_w(a, b, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrlni_w_d(a: v8i32, b: v8i32) -> v8i32 { +pub fn lasx_xvssrlni_w_d(a: v8i32, b: v8i32) -> v8i32 { static_assert_uimm_bits!(IMM6, 6); - __lasx_xvssrlni_w_d(a, b, IMM6) + unsafe { __lasx_xvssrlni_w_d(a, b, IMM6) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrlni_d_q(a: v4i64, b: v4i64) -> v4i64 { +pub fn lasx_xvssrlni_d_q(a: v4i64, b: v4i64) -> v4i64 { static_assert_uimm_bits!(IMM7, 7); - __lasx_xvssrlni_d_q(a, b, IMM7) + unsafe { __lasx_xvssrlni_d_q(a, b, IMM7) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrlni_bu_h(a: v32u8, b: v32i8) -> v32u8 { +pub fn lasx_xvssrlni_bu_h(a: v32u8, b: v32i8) -> v32u8 { static_assert_uimm_bits!(IMM4, 4); - __lasx_xvssrlni_bu_h(a, b, IMM4) + unsafe { __lasx_xvssrlni_bu_h(a, b, IMM4) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrlni_hu_w(a: v16u16, b: v16i16) -> v16u16 { +pub fn lasx_xvssrlni_hu_w(a: v16u16, b: v16i16) -> v16u16 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvssrlni_hu_w(a, b, IMM5) + unsafe { __lasx_xvssrlni_hu_w(a, b, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrlni_wu_d(a: v8u32, b: v8i32) -> v8u32 { +pub fn lasx_xvssrlni_wu_d(a: v8u32, b: v8i32) -> v8u32 { static_assert_uimm_bits!(IMM6, 6); - __lasx_xvssrlni_wu_d(a, b, IMM6) + unsafe { __lasx_xvssrlni_wu_d(a, b, IMM6) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrlni_du_q(a: v4u64, b: v4i64) -> v4u64 { +pub fn lasx_xvssrlni_du_q(a: v4u64, b: v4i64) -> v4u64 { static_assert_uimm_bits!(IMM7, 7); - __lasx_xvssrlni_du_q(a, b, IMM7) + unsafe { __lasx_xvssrlni_du_q(a, b, IMM7) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrlrni_b_h(a: v32i8, b: v32i8) -> v32i8 { +pub fn lasx_xvssrlrni_b_h(a: v32i8, b: v32i8) -> v32i8 { static_assert_uimm_bits!(IMM4, 4); - __lasx_xvssrlrni_b_h(a, b, IMM4) + unsafe { __lasx_xvssrlrni_b_h(a, b, IMM4) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrlrni_h_w(a: v16i16, b: v16i16) -> v16i16 { +pub fn lasx_xvssrlrni_h_w(a: v16i16, b: v16i16) -> v16i16 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvssrlrni_h_w(a, b, IMM5) + unsafe { __lasx_xvssrlrni_h_w(a, b, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrlrni_w_d(a: v8i32, b: v8i32) -> v8i32 { +pub fn lasx_xvssrlrni_w_d(a: v8i32, b: v8i32) -> v8i32 { static_assert_uimm_bits!(IMM6, 6); - __lasx_xvssrlrni_w_d(a, b, IMM6) + unsafe { __lasx_xvssrlrni_w_d(a, b, IMM6) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrlrni_d_q(a: v4i64, b: v4i64) -> v4i64 { +pub fn lasx_xvssrlrni_d_q(a: v4i64, b: v4i64) -> v4i64 { static_assert_uimm_bits!(IMM7, 7); - __lasx_xvssrlrni_d_q(a, b, IMM7) + unsafe { __lasx_xvssrlrni_d_q(a, b, IMM7) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrlrni_bu_h(a: v32u8, b: v32i8) -> v32u8 { +pub fn lasx_xvssrlrni_bu_h(a: v32u8, b: v32i8) -> v32u8 { static_assert_uimm_bits!(IMM4, 4); - __lasx_xvssrlrni_bu_h(a, b, IMM4) + unsafe { __lasx_xvssrlrni_bu_h(a, b, IMM4) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrlrni_hu_w(a: v16u16, b: v16i16) -> v16u16 { +pub fn lasx_xvssrlrni_hu_w(a: v16u16, b: v16i16) -> v16u16 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvssrlrni_hu_w(a, b, IMM5) + unsafe { __lasx_xvssrlrni_hu_w(a, b, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrlrni_wu_d(a: v8u32, b: v8i32) -> v8u32 { +pub fn lasx_xvssrlrni_wu_d(a: v8u32, b: v8i32) -> v8u32 { static_assert_uimm_bits!(IMM6, 6); - __lasx_xvssrlrni_wu_d(a, b, IMM6) + unsafe { __lasx_xvssrlrni_wu_d(a, b, IMM6) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrlrni_du_q(a: v4u64, b: v4i64) -> v4u64 { +pub fn lasx_xvssrlrni_du_q(a: v4u64, b: v4i64) -> v4u64 { static_assert_uimm_bits!(IMM7, 7); - __lasx_xvssrlrni_du_q(a, b, IMM7) + unsafe { __lasx_xvssrlrni_du_q(a, b, IMM7) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrani_b_h(a: v32i8, b: v32i8) -> v32i8 { +pub fn lasx_xvsrani_b_h(a: v32i8, b: v32i8) -> v32i8 { static_assert_uimm_bits!(IMM4, 4); - __lasx_xvsrani_b_h(a, b, IMM4) + unsafe { __lasx_xvsrani_b_h(a, b, IMM4) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrani_h_w(a: v16i16, b: v16i16) -> v16i16 { +pub fn lasx_xvsrani_h_w(a: v16i16, b: v16i16) -> v16i16 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvsrani_h_w(a, b, IMM5) + unsafe { __lasx_xvsrani_h_w(a, b, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrani_w_d(a: v8i32, b: v8i32) -> v8i32 { +pub fn lasx_xvsrani_w_d(a: v8i32, b: v8i32) -> v8i32 { static_assert_uimm_bits!(IMM6, 6); - __lasx_xvsrani_w_d(a, b, IMM6) + unsafe { __lasx_xvsrani_w_d(a, b, IMM6) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrani_d_q(a: v4i64, b: v4i64) -> v4i64 { +pub fn lasx_xvsrani_d_q(a: v4i64, b: v4i64) -> v4i64 { static_assert_uimm_bits!(IMM7, 7); - __lasx_xvsrani_d_q(a, b, IMM7) + unsafe { __lasx_xvsrani_d_q(a, b, IMM7) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrarni_b_h(a: v32i8, b: v32i8) -> v32i8 { +pub fn lasx_xvsrarni_b_h(a: v32i8, b: v32i8) -> v32i8 { static_assert_uimm_bits!(IMM4, 4); - __lasx_xvsrarni_b_h(a, b, IMM4) + unsafe { __lasx_xvsrarni_b_h(a, b, IMM4) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrarni_h_w(a: v16i16, b: v16i16) -> v16i16 { +pub fn lasx_xvsrarni_h_w(a: v16i16, b: v16i16) -> v16i16 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvsrarni_h_w(a, b, IMM5) + unsafe { __lasx_xvsrarni_h_w(a, b, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrarni_w_d(a: v8i32, b: v8i32) -> v8i32 { +pub fn lasx_xvsrarni_w_d(a: v8i32, b: v8i32) -> v8i32 { static_assert_uimm_bits!(IMM6, 6); - __lasx_xvsrarni_w_d(a, b, IMM6) + unsafe { __lasx_xvsrarni_w_d(a, b, IMM6) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvsrarni_d_q(a: v4i64, b: v4i64) -> v4i64 { +pub fn lasx_xvsrarni_d_q(a: v4i64, b: v4i64) -> v4i64 { static_assert_uimm_bits!(IMM7, 7); - __lasx_xvsrarni_d_q(a, b, IMM7) + unsafe { __lasx_xvsrarni_d_q(a, b, IMM7) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrani_b_h(a: v32i8, b: v32i8) -> v32i8 { +pub fn lasx_xvssrani_b_h(a: v32i8, b: v32i8) -> v32i8 { static_assert_uimm_bits!(IMM4, 4); - __lasx_xvssrani_b_h(a, b, IMM4) + unsafe { __lasx_xvssrani_b_h(a, b, IMM4) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrani_h_w(a: v16i16, b: v16i16) -> v16i16 { +pub fn lasx_xvssrani_h_w(a: v16i16, b: v16i16) -> v16i16 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvssrani_h_w(a, b, IMM5) + unsafe { __lasx_xvssrani_h_w(a, b, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrani_w_d(a: v8i32, b: v8i32) -> v8i32 { +pub fn lasx_xvssrani_w_d(a: v8i32, b: v8i32) -> v8i32 { static_assert_uimm_bits!(IMM6, 6); - __lasx_xvssrani_w_d(a, b, IMM6) + unsafe { __lasx_xvssrani_w_d(a, b, IMM6) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrani_d_q(a: v4i64, b: v4i64) -> v4i64 { +pub fn lasx_xvssrani_d_q(a: v4i64, b: v4i64) -> v4i64 { static_assert_uimm_bits!(IMM7, 7); - __lasx_xvssrani_d_q(a, b, IMM7) + unsafe { __lasx_xvssrani_d_q(a, b, IMM7) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrani_bu_h(a: v32u8, b: v32i8) -> v32u8 { +pub fn lasx_xvssrani_bu_h(a: v32u8, b: v32i8) -> v32u8 { static_assert_uimm_bits!(IMM4, 4); - __lasx_xvssrani_bu_h(a, b, IMM4) + unsafe { __lasx_xvssrani_bu_h(a, b, IMM4) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrani_hu_w(a: v16u16, b: v16i16) -> v16u16 { +pub fn lasx_xvssrani_hu_w(a: v16u16, b: v16i16) -> v16u16 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvssrani_hu_w(a, b, IMM5) + unsafe { __lasx_xvssrani_hu_w(a, b, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrani_wu_d(a: v8u32, b: v8i32) -> v8u32 { +pub fn lasx_xvssrani_wu_d(a: v8u32, b: v8i32) -> v8u32 { static_assert_uimm_bits!(IMM6, 6); - __lasx_xvssrani_wu_d(a, b, IMM6) + unsafe { __lasx_xvssrani_wu_d(a, b, IMM6) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrani_du_q(a: v4u64, b: v4i64) -> v4u64 { +pub fn lasx_xvssrani_du_q(a: v4u64, b: v4i64) -> v4u64 { static_assert_uimm_bits!(IMM7, 7); - __lasx_xvssrani_du_q(a, b, IMM7) + unsafe { __lasx_xvssrani_du_q(a, b, IMM7) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrarni_b_h(a: v32i8, b: v32i8) -> v32i8 { +pub fn lasx_xvssrarni_b_h(a: v32i8, b: v32i8) -> v32i8 { static_assert_uimm_bits!(IMM4, 4); - __lasx_xvssrarni_b_h(a, b, IMM4) + unsafe { __lasx_xvssrarni_b_h(a, b, IMM4) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrarni_h_w(a: v16i16, b: v16i16) -> v16i16 { +pub fn lasx_xvssrarni_h_w(a: v16i16, b: v16i16) -> v16i16 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvssrarni_h_w(a, b, IMM5) + unsafe { __lasx_xvssrarni_h_w(a, b, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrarni_w_d(a: v8i32, b: v8i32) -> v8i32 { +pub fn lasx_xvssrarni_w_d(a: v8i32, b: v8i32) -> v8i32 { static_assert_uimm_bits!(IMM6, 6); - __lasx_xvssrarni_w_d(a, b, IMM6) + unsafe { __lasx_xvssrarni_w_d(a, b, IMM6) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrarni_d_q(a: v4i64, b: v4i64) -> v4i64 { +pub fn lasx_xvssrarni_d_q(a: v4i64, b: v4i64) -> v4i64 { static_assert_uimm_bits!(IMM7, 7); - __lasx_xvssrarni_d_q(a, b, IMM7) + unsafe { __lasx_xvssrarni_d_q(a, b, IMM7) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrarni_bu_h(a: v32u8, b: v32i8) -> v32u8 { +pub fn lasx_xvssrarni_bu_h(a: v32u8, b: v32i8) -> v32u8 { static_assert_uimm_bits!(IMM4, 4); - __lasx_xvssrarni_bu_h(a, b, IMM4) + unsafe { __lasx_xvssrarni_bu_h(a, b, IMM4) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrarni_hu_w(a: v16u16, b: v16i16) -> v16u16 { +pub fn lasx_xvssrarni_hu_w(a: v16u16, b: v16i16) -> v16u16 { static_assert_uimm_bits!(IMM5, 5); - __lasx_xvssrarni_hu_w(a, b, IMM5) + unsafe { __lasx_xvssrarni_hu_w(a, b, IMM5) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrarni_wu_d(a: v8u32, b: v8i32) -> v8u32 { +pub fn lasx_xvssrarni_wu_d(a: v8u32, b: v8i32) -> v8u32 { static_assert_uimm_bits!(IMM6, 6); - __lasx_xvssrarni_wu_d(a, b, IMM6) + unsafe { __lasx_xvssrarni_wu_d(a, b, IMM6) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvssrarni_du_q(a: v4u64, b: v4i64) -> v4u64 { +pub fn lasx_xvssrarni_du_q(a: v4u64, b: v4i64) -> v4u64 { static_assert_uimm_bits!(IMM7, 7); - __lasx_xvssrarni_du_q(a, b, IMM7) + unsafe { __lasx_xvssrarni_du_q(a, b, IMM7) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xbnz_b(a: v32u8) -> i32 { - __lasx_xbnz_b(a) +pub fn lasx_xbnz_b(a: v32u8) -> i32 { + unsafe { __lasx_xbnz_b(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xbnz_d(a: v4u64) -> i32 { - __lasx_xbnz_d(a) +pub fn lasx_xbnz_d(a: v4u64) -> i32 { + unsafe { __lasx_xbnz_d(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xbnz_h(a: v16u16) -> i32 { - __lasx_xbnz_h(a) +pub fn lasx_xbnz_h(a: v16u16) -> i32 { + unsafe { __lasx_xbnz_h(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xbnz_v(a: v32u8) -> i32 { - __lasx_xbnz_v(a) +pub fn lasx_xbnz_v(a: v32u8) -> i32 { + unsafe { __lasx_xbnz_v(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xbnz_w(a: v8u32) -> i32 { - __lasx_xbnz_w(a) +pub fn lasx_xbnz_w(a: v8u32) -> i32 { + unsafe { __lasx_xbnz_w(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xbz_b(a: v32u8) -> i32 { - __lasx_xbz_b(a) +pub fn lasx_xbz_b(a: v32u8) -> i32 { + unsafe { __lasx_xbz_b(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xbz_d(a: v4u64) -> i32 { - __lasx_xbz_d(a) +pub fn lasx_xbz_d(a: v4u64) -> i32 { + unsafe { __lasx_xbz_d(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xbz_h(a: v16u16) -> i32 { - __lasx_xbz_h(a) +pub fn lasx_xbz_h(a: v16u16) -> i32 { + unsafe { __lasx_xbz_h(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xbz_v(a: v32u8) -> i32 { - __lasx_xbz_v(a) +pub fn lasx_xbz_v(a: v32u8) -> i32 { + unsafe { __lasx_xbz_v(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xbz_w(a: v8u32) -> i32 { - __lasx_xbz_w(a) +pub fn lasx_xbz_w(a: v8u32) -> i32 { + unsafe { __lasx_xbz_w(a) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_caf_d(a: v4f64, b: v4f64) -> v4i64 { - __lasx_xvfcmp_caf_d(a, b) +pub fn lasx_xvfcmp_caf_d(a: v4f64, b: v4f64) -> v4i64 { + unsafe { __lasx_xvfcmp_caf_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_caf_s(a: v8f32, b: v8f32) -> v8i32 { - __lasx_xvfcmp_caf_s(a, b) +pub fn lasx_xvfcmp_caf_s(a: v8f32, b: v8f32) -> v8i32 { + unsafe { __lasx_xvfcmp_caf_s(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_ceq_d(a: v4f64, b: v4f64) -> v4i64 { - __lasx_xvfcmp_ceq_d(a, b) +pub fn lasx_xvfcmp_ceq_d(a: v4f64, b: v4f64) -> v4i64 { + unsafe { __lasx_xvfcmp_ceq_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_ceq_s(a: v8f32, b: v8f32) -> v8i32 { - __lasx_xvfcmp_ceq_s(a, b) +pub fn lasx_xvfcmp_ceq_s(a: v8f32, b: v8f32) -> v8i32 { + unsafe { __lasx_xvfcmp_ceq_s(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_cle_d(a: v4f64, b: v4f64) -> v4i64 { - __lasx_xvfcmp_cle_d(a, b) +pub fn lasx_xvfcmp_cle_d(a: v4f64, b: v4f64) -> v4i64 { + unsafe { __lasx_xvfcmp_cle_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_cle_s(a: v8f32, b: v8f32) -> v8i32 { - __lasx_xvfcmp_cle_s(a, b) +pub fn lasx_xvfcmp_cle_s(a: v8f32, b: v8f32) -> v8i32 { + unsafe { __lasx_xvfcmp_cle_s(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_clt_d(a: v4f64, b: v4f64) -> v4i64 { - __lasx_xvfcmp_clt_d(a, b) +pub fn lasx_xvfcmp_clt_d(a: v4f64, b: v4f64) -> v4i64 { + unsafe { __lasx_xvfcmp_clt_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_clt_s(a: v8f32, b: v8f32) -> v8i32 { - __lasx_xvfcmp_clt_s(a, b) +pub fn lasx_xvfcmp_clt_s(a: v8f32, b: v8f32) -> v8i32 { + unsafe { __lasx_xvfcmp_clt_s(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_cne_d(a: v4f64, b: v4f64) -> v4i64 { - __lasx_xvfcmp_cne_d(a, b) +pub fn lasx_xvfcmp_cne_d(a: v4f64, b: v4f64) -> v4i64 { + unsafe { __lasx_xvfcmp_cne_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_cne_s(a: v8f32, b: v8f32) -> v8i32 { - __lasx_xvfcmp_cne_s(a, b) +pub fn lasx_xvfcmp_cne_s(a: v8f32, b: v8f32) -> v8i32 { + unsafe { __lasx_xvfcmp_cne_s(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_cor_d(a: v4f64, b: v4f64) -> v4i64 { - __lasx_xvfcmp_cor_d(a, b) +pub fn lasx_xvfcmp_cor_d(a: v4f64, b: v4f64) -> v4i64 { + unsafe { __lasx_xvfcmp_cor_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_cor_s(a: v8f32, b: v8f32) -> v8i32 { - __lasx_xvfcmp_cor_s(a, b) +pub fn lasx_xvfcmp_cor_s(a: v8f32, b: v8f32) -> v8i32 { + unsafe { __lasx_xvfcmp_cor_s(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_cueq_d(a: v4f64, b: v4f64) -> v4i64 { - __lasx_xvfcmp_cueq_d(a, b) +pub fn lasx_xvfcmp_cueq_d(a: v4f64, b: v4f64) -> v4i64 { + unsafe { __lasx_xvfcmp_cueq_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_cueq_s(a: v8f32, b: v8f32) -> v8i32 { - __lasx_xvfcmp_cueq_s(a, b) +pub fn lasx_xvfcmp_cueq_s(a: v8f32, b: v8f32) -> v8i32 { + unsafe { __lasx_xvfcmp_cueq_s(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_cule_d(a: v4f64, b: v4f64) -> v4i64 { - __lasx_xvfcmp_cule_d(a, b) +pub fn lasx_xvfcmp_cule_d(a: v4f64, b: v4f64) -> v4i64 { + unsafe { __lasx_xvfcmp_cule_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_cule_s(a: v8f32, b: v8f32) -> v8i32 { - __lasx_xvfcmp_cule_s(a, b) +pub fn lasx_xvfcmp_cule_s(a: v8f32, b: v8f32) -> v8i32 { + unsafe { __lasx_xvfcmp_cule_s(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_cult_d(a: v4f64, b: v4f64) -> v4i64 { - __lasx_xvfcmp_cult_d(a, b) +pub fn lasx_xvfcmp_cult_d(a: v4f64, b: v4f64) -> v4i64 { + unsafe { __lasx_xvfcmp_cult_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_cult_s(a: v8f32, b: v8f32) -> v8i32 { - __lasx_xvfcmp_cult_s(a, b) +pub fn lasx_xvfcmp_cult_s(a: v8f32, b: v8f32) -> v8i32 { + unsafe { __lasx_xvfcmp_cult_s(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_cun_d(a: v4f64, b: v4f64) -> v4i64 { - __lasx_xvfcmp_cun_d(a, b) +pub fn lasx_xvfcmp_cun_d(a: v4f64, b: v4f64) -> v4i64 { + unsafe { __lasx_xvfcmp_cun_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_cune_d(a: v4f64, b: v4f64) -> v4i64 { - __lasx_xvfcmp_cune_d(a, b) +pub fn lasx_xvfcmp_cune_d(a: v4f64, b: v4f64) -> v4i64 { + unsafe { __lasx_xvfcmp_cune_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_cune_s(a: v8f32, b: v8f32) -> v8i32 { - __lasx_xvfcmp_cune_s(a, b) +pub fn lasx_xvfcmp_cune_s(a: v8f32, b: v8f32) -> v8i32 { + unsafe { __lasx_xvfcmp_cune_s(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_cun_s(a: v8f32, b: v8f32) -> v8i32 { - __lasx_xvfcmp_cun_s(a, b) +pub fn lasx_xvfcmp_cun_s(a: v8f32, b: v8f32) -> v8i32 { + unsafe { __lasx_xvfcmp_cun_s(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_saf_d(a: v4f64, b: v4f64) -> v4i64 { - __lasx_xvfcmp_saf_d(a, b) +pub fn lasx_xvfcmp_saf_d(a: v4f64, b: v4f64) -> v4i64 { + unsafe { __lasx_xvfcmp_saf_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_saf_s(a: v8f32, b: v8f32) -> v8i32 { - __lasx_xvfcmp_saf_s(a, b) +pub fn lasx_xvfcmp_saf_s(a: v8f32, b: v8f32) -> v8i32 { + unsafe { __lasx_xvfcmp_saf_s(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_seq_d(a: v4f64, b: v4f64) -> v4i64 { - __lasx_xvfcmp_seq_d(a, b) +pub fn lasx_xvfcmp_seq_d(a: v4f64, b: v4f64) -> v4i64 { + unsafe { __lasx_xvfcmp_seq_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_seq_s(a: v8f32, b: v8f32) -> v8i32 { - __lasx_xvfcmp_seq_s(a, b) +pub fn lasx_xvfcmp_seq_s(a: v8f32, b: v8f32) -> v8i32 { + unsafe { __lasx_xvfcmp_seq_s(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_sle_d(a: v4f64, b: v4f64) -> v4i64 { - __lasx_xvfcmp_sle_d(a, b) +pub fn lasx_xvfcmp_sle_d(a: v4f64, b: v4f64) -> v4i64 { + unsafe { __lasx_xvfcmp_sle_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_sle_s(a: v8f32, b: v8f32) -> v8i32 { - __lasx_xvfcmp_sle_s(a, b) +pub fn lasx_xvfcmp_sle_s(a: v8f32, b: v8f32) -> v8i32 { + unsafe { __lasx_xvfcmp_sle_s(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_slt_d(a: v4f64, b: v4f64) -> v4i64 { - __lasx_xvfcmp_slt_d(a, b) +pub fn lasx_xvfcmp_slt_d(a: v4f64, b: v4f64) -> v4i64 { + unsafe { __lasx_xvfcmp_slt_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_slt_s(a: v8f32, b: v8f32) -> v8i32 { - __lasx_xvfcmp_slt_s(a, b) +pub fn lasx_xvfcmp_slt_s(a: v8f32, b: v8f32) -> v8i32 { + unsafe { __lasx_xvfcmp_slt_s(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_sne_d(a: v4f64, b: v4f64) -> v4i64 { - __lasx_xvfcmp_sne_d(a, b) +pub fn lasx_xvfcmp_sne_d(a: v4f64, b: v4f64) -> v4i64 { + unsafe { __lasx_xvfcmp_sne_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_sne_s(a: v8f32, b: v8f32) -> v8i32 { - __lasx_xvfcmp_sne_s(a, b) +pub fn lasx_xvfcmp_sne_s(a: v8f32, b: v8f32) -> v8i32 { + unsafe { __lasx_xvfcmp_sne_s(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_sor_d(a: v4f64, b: v4f64) -> v4i64 { - __lasx_xvfcmp_sor_d(a, b) +pub fn lasx_xvfcmp_sor_d(a: v4f64, b: v4f64) -> v4i64 { + unsafe { __lasx_xvfcmp_sor_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_sor_s(a: v8f32, b: v8f32) -> v8i32 { - __lasx_xvfcmp_sor_s(a, b) +pub fn lasx_xvfcmp_sor_s(a: v8f32, b: v8f32) -> v8i32 { + unsafe { __lasx_xvfcmp_sor_s(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_sueq_d(a: v4f64, b: v4f64) -> v4i64 { - __lasx_xvfcmp_sueq_d(a, b) +pub fn lasx_xvfcmp_sueq_d(a: v4f64, b: v4f64) -> v4i64 { + unsafe { __lasx_xvfcmp_sueq_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_sueq_s(a: v8f32, b: v8f32) -> v8i32 { - __lasx_xvfcmp_sueq_s(a, b) +pub fn lasx_xvfcmp_sueq_s(a: v8f32, b: v8f32) -> v8i32 { + unsafe { __lasx_xvfcmp_sueq_s(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_sule_d(a: v4f64, b: v4f64) -> v4i64 { - __lasx_xvfcmp_sule_d(a, b) +pub fn lasx_xvfcmp_sule_d(a: v4f64, b: v4f64) -> v4i64 { + unsafe { __lasx_xvfcmp_sule_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_sule_s(a: v8f32, b: v8f32) -> v8i32 { - __lasx_xvfcmp_sule_s(a, b) +pub fn lasx_xvfcmp_sule_s(a: v8f32, b: v8f32) -> v8i32 { + unsafe { __lasx_xvfcmp_sule_s(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_sult_d(a: v4f64, b: v4f64) -> v4i64 { - __lasx_xvfcmp_sult_d(a, b) +pub fn lasx_xvfcmp_sult_d(a: v4f64, b: v4f64) -> v4i64 { + unsafe { __lasx_xvfcmp_sult_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_sult_s(a: v8f32, b: v8f32) -> v8i32 { - __lasx_xvfcmp_sult_s(a, b) +pub fn lasx_xvfcmp_sult_s(a: v8f32, b: v8f32) -> v8i32 { + unsafe { __lasx_xvfcmp_sult_s(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_sun_d(a: v4f64, b: v4f64) -> v4i64 { - __lasx_xvfcmp_sun_d(a, b) +pub fn lasx_xvfcmp_sun_d(a: v4f64, b: v4f64) -> v4i64 { + unsafe { __lasx_xvfcmp_sun_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_sune_d(a: v4f64, b: v4f64) -> v4i64 { - __lasx_xvfcmp_sune_d(a, b) +pub fn lasx_xvfcmp_sune_d(a: v4f64, b: v4f64) -> v4i64 { + unsafe { __lasx_xvfcmp_sune_d(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_sune_s(a: v8f32, b: v8f32) -> v8i32 { - __lasx_xvfcmp_sune_s(a, b) +pub fn lasx_xvfcmp_sune_s(a: v8f32, b: v8f32) -> v8i32 { + unsafe { __lasx_xvfcmp_sune_s(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvfcmp_sun_s(a: v8f32, b: v8f32) -> v8i32 { - __lasx_xvfcmp_sun_s(a, b) +pub fn lasx_xvfcmp_sun_s(a: v8f32, b: v8f32) -> v8i32 { + unsafe { __lasx_xvfcmp_sun_s(a, b) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvpickve_d_f(a: v4f64) -> v4f64 { +pub fn lasx_xvpickve_d_f(a: v4f64) -> v4f64 { static_assert_uimm_bits!(IMM2, 2); - __lasx_xvpickve_d_f(a, IMM2) + unsafe { __lasx_xvpickve_d_f(a, IMM2) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvpickve_w_f(a: v8f32) -> v8f32 { +pub fn lasx_xvpickve_w_f(a: v8f32) -> v8f32 { static_assert_uimm_bits!(IMM3, 3); - __lasx_xvpickve_w_f(a, IMM3) + unsafe { __lasx_xvpickve_w_f(a, IMM3) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(0)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvrepli_b() -> v32i8 { +pub fn lasx_xvrepli_b() -> v32i8 { static_assert_simm_bits!(IMM_S10, 10); - __lasx_xvrepli_b(IMM_S10) + unsafe { __lasx_xvrepli_b(IMM_S10) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(0)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvrepli_d() -> v4i64 { +pub fn lasx_xvrepli_d() -> v4i64 { static_assert_simm_bits!(IMM_S10, 10); - __lasx_xvrepli_d(IMM_S10) + unsafe { __lasx_xvrepli_d(IMM_S10) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(0)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvrepli_h() -> v16i16 { +pub fn lasx_xvrepli_h() -> v16i16 { static_assert_simm_bits!(IMM_S10, 10); - __lasx_xvrepli_h(IMM_S10) + unsafe { __lasx_xvrepli_h(IMM_S10) } } #[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(0)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lasx_xvrepli_w() -> v8i32 { +pub fn lasx_xvrepli_w() -> v8i32 { static_assert_simm_bits!(IMM_S10, 10); - __lasx_xvrepli_w(IMM_S10) + unsafe { __lasx_xvrepli_w(IMM_S10) } } diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs b/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs index 2bc364f3e069e..ba821a3e3dc6d 100644 --- a/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs +++ b/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs @@ -1455,3593 +1455,3593 @@ unsafe extern "unadjusted" { #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsll_b(a: v16i8, b: v16i8) -> v16i8 { - __lsx_vsll_b(a, b) +pub fn lsx_vsll_b(a: v16i8, b: v16i8) -> v16i8 { + unsafe { __lsx_vsll_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsll_h(a: v8i16, b: v8i16) -> v8i16 { - __lsx_vsll_h(a, b) +pub fn lsx_vsll_h(a: v8i16, b: v8i16) -> v8i16 { + unsafe { __lsx_vsll_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsll_w(a: v4i32, b: v4i32) -> v4i32 { - __lsx_vsll_w(a, b) +pub fn lsx_vsll_w(a: v4i32, b: v4i32) -> v4i32 { + unsafe { __lsx_vsll_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsll_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vsll_d(a, b) +pub fn lsx_vsll_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vsll_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vslli_b(a: v16i8) -> v16i8 { +pub fn lsx_vslli_b(a: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM3, 3); - __lsx_vslli_b(a, IMM3) + unsafe { __lsx_vslli_b(a, IMM3) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vslli_h(a: v8i16) -> v8i16 { +pub fn lsx_vslli_h(a: v8i16) -> v8i16 { static_assert_uimm_bits!(IMM4, 4); - __lsx_vslli_h(a, IMM4) + unsafe { __lsx_vslli_h(a, IMM4) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vslli_w(a: v4i32) -> v4i32 { +pub fn lsx_vslli_w(a: v4i32) -> v4i32 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vslli_w(a, IMM5) + unsafe { __lsx_vslli_w(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vslli_d(a: v2i64) -> v2i64 { +pub fn lsx_vslli_d(a: v2i64) -> v2i64 { static_assert_uimm_bits!(IMM6, 6); - __lsx_vslli_d(a, IMM6) + unsafe { __lsx_vslli_d(a, IMM6) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsra_b(a: v16i8, b: v16i8) -> v16i8 { - __lsx_vsra_b(a, b) +pub fn lsx_vsra_b(a: v16i8, b: v16i8) -> v16i8 { + unsafe { __lsx_vsra_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsra_h(a: v8i16, b: v8i16) -> v8i16 { - __lsx_vsra_h(a, b) +pub fn lsx_vsra_h(a: v8i16, b: v8i16) -> v8i16 { + unsafe { __lsx_vsra_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsra_w(a: v4i32, b: v4i32) -> v4i32 { - __lsx_vsra_w(a, b) +pub fn lsx_vsra_w(a: v4i32, b: v4i32) -> v4i32 { + unsafe { __lsx_vsra_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsra_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vsra_d(a, b) +pub fn lsx_vsra_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vsra_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrai_b(a: v16i8) -> v16i8 { +pub fn lsx_vsrai_b(a: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM3, 3); - __lsx_vsrai_b(a, IMM3) + unsafe { __lsx_vsrai_b(a, IMM3) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrai_h(a: v8i16) -> v8i16 { +pub fn lsx_vsrai_h(a: v8i16) -> v8i16 { static_assert_uimm_bits!(IMM4, 4); - __lsx_vsrai_h(a, IMM4) + unsafe { __lsx_vsrai_h(a, IMM4) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrai_w(a: v4i32) -> v4i32 { +pub fn lsx_vsrai_w(a: v4i32) -> v4i32 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vsrai_w(a, IMM5) + unsafe { __lsx_vsrai_w(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrai_d(a: v2i64) -> v2i64 { +pub fn lsx_vsrai_d(a: v2i64) -> v2i64 { static_assert_uimm_bits!(IMM6, 6); - __lsx_vsrai_d(a, IMM6) + unsafe { __lsx_vsrai_d(a, IMM6) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrar_b(a: v16i8, b: v16i8) -> v16i8 { - __lsx_vsrar_b(a, b) +pub fn lsx_vsrar_b(a: v16i8, b: v16i8) -> v16i8 { + unsafe { __lsx_vsrar_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrar_h(a: v8i16, b: v8i16) -> v8i16 { - __lsx_vsrar_h(a, b) +pub fn lsx_vsrar_h(a: v8i16, b: v8i16) -> v8i16 { + unsafe { __lsx_vsrar_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrar_w(a: v4i32, b: v4i32) -> v4i32 { - __lsx_vsrar_w(a, b) +pub fn lsx_vsrar_w(a: v4i32, b: v4i32) -> v4i32 { + unsafe { __lsx_vsrar_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrar_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vsrar_d(a, b) +pub fn lsx_vsrar_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vsrar_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrari_b(a: v16i8) -> v16i8 { +pub fn lsx_vsrari_b(a: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM3, 3); - __lsx_vsrari_b(a, IMM3) + unsafe { __lsx_vsrari_b(a, IMM3) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrari_h(a: v8i16) -> v8i16 { +pub fn lsx_vsrari_h(a: v8i16) -> v8i16 { static_assert_uimm_bits!(IMM4, 4); - __lsx_vsrari_h(a, IMM4) + unsafe { __lsx_vsrari_h(a, IMM4) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrari_w(a: v4i32) -> v4i32 { +pub fn lsx_vsrari_w(a: v4i32) -> v4i32 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vsrari_w(a, IMM5) + unsafe { __lsx_vsrari_w(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrari_d(a: v2i64) -> v2i64 { +pub fn lsx_vsrari_d(a: v2i64) -> v2i64 { static_assert_uimm_bits!(IMM6, 6); - __lsx_vsrari_d(a, IMM6) + unsafe { __lsx_vsrari_d(a, IMM6) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrl_b(a: v16i8, b: v16i8) -> v16i8 { - __lsx_vsrl_b(a, b) +pub fn lsx_vsrl_b(a: v16i8, b: v16i8) -> v16i8 { + unsafe { __lsx_vsrl_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrl_h(a: v8i16, b: v8i16) -> v8i16 { - __lsx_vsrl_h(a, b) +pub fn lsx_vsrl_h(a: v8i16, b: v8i16) -> v8i16 { + unsafe { __lsx_vsrl_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrl_w(a: v4i32, b: v4i32) -> v4i32 { - __lsx_vsrl_w(a, b) +pub fn lsx_vsrl_w(a: v4i32, b: v4i32) -> v4i32 { + unsafe { __lsx_vsrl_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrl_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vsrl_d(a, b) +pub fn lsx_vsrl_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vsrl_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrli_b(a: v16i8) -> v16i8 { +pub fn lsx_vsrli_b(a: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM3, 3); - __lsx_vsrli_b(a, IMM3) + unsafe { __lsx_vsrli_b(a, IMM3) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrli_h(a: v8i16) -> v8i16 { +pub fn lsx_vsrli_h(a: v8i16) -> v8i16 { static_assert_uimm_bits!(IMM4, 4); - __lsx_vsrli_h(a, IMM4) + unsafe { __lsx_vsrli_h(a, IMM4) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrli_w(a: v4i32) -> v4i32 { +pub fn lsx_vsrli_w(a: v4i32) -> v4i32 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vsrli_w(a, IMM5) + unsafe { __lsx_vsrli_w(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrli_d(a: v2i64) -> v2i64 { +pub fn lsx_vsrli_d(a: v2i64) -> v2i64 { static_assert_uimm_bits!(IMM6, 6); - __lsx_vsrli_d(a, IMM6) + unsafe { __lsx_vsrli_d(a, IMM6) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrlr_b(a: v16i8, b: v16i8) -> v16i8 { - __lsx_vsrlr_b(a, b) +pub fn lsx_vsrlr_b(a: v16i8, b: v16i8) -> v16i8 { + unsafe { __lsx_vsrlr_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrlr_h(a: v8i16, b: v8i16) -> v8i16 { - __lsx_vsrlr_h(a, b) +pub fn lsx_vsrlr_h(a: v8i16, b: v8i16) -> v8i16 { + unsafe { __lsx_vsrlr_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrlr_w(a: v4i32, b: v4i32) -> v4i32 { - __lsx_vsrlr_w(a, b) +pub fn lsx_vsrlr_w(a: v4i32, b: v4i32) -> v4i32 { + unsafe { __lsx_vsrlr_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrlr_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vsrlr_d(a, b) +pub fn lsx_vsrlr_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vsrlr_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrlri_b(a: v16i8) -> v16i8 { +pub fn lsx_vsrlri_b(a: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM3, 3); - __lsx_vsrlri_b(a, IMM3) + unsafe { __lsx_vsrlri_b(a, IMM3) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrlri_h(a: v8i16) -> v8i16 { +pub fn lsx_vsrlri_h(a: v8i16) -> v8i16 { static_assert_uimm_bits!(IMM4, 4); - __lsx_vsrlri_h(a, IMM4) + unsafe { __lsx_vsrlri_h(a, IMM4) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrlri_w(a: v4i32) -> v4i32 { +pub fn lsx_vsrlri_w(a: v4i32) -> v4i32 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vsrlri_w(a, IMM5) + unsafe { __lsx_vsrlri_w(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrlri_d(a: v2i64) -> v2i64 { +pub fn lsx_vsrlri_d(a: v2i64) -> v2i64 { static_assert_uimm_bits!(IMM6, 6); - __lsx_vsrlri_d(a, IMM6) + unsafe { __lsx_vsrlri_d(a, IMM6) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vbitclr_b(a: v16u8, b: v16u8) -> v16u8 { - __lsx_vbitclr_b(a, b) +pub fn lsx_vbitclr_b(a: v16u8, b: v16u8) -> v16u8 { + unsafe { __lsx_vbitclr_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vbitclr_h(a: v8u16, b: v8u16) -> v8u16 { - __lsx_vbitclr_h(a, b) +pub fn lsx_vbitclr_h(a: v8u16, b: v8u16) -> v8u16 { + unsafe { __lsx_vbitclr_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vbitclr_w(a: v4u32, b: v4u32) -> v4u32 { - __lsx_vbitclr_w(a, b) +pub fn lsx_vbitclr_w(a: v4u32, b: v4u32) -> v4u32 { + unsafe { __lsx_vbitclr_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vbitclr_d(a: v2u64, b: v2u64) -> v2u64 { - __lsx_vbitclr_d(a, b) +pub fn lsx_vbitclr_d(a: v2u64, b: v2u64) -> v2u64 { + unsafe { __lsx_vbitclr_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vbitclri_b(a: v16u8) -> v16u8 { +pub fn lsx_vbitclri_b(a: v16u8) -> v16u8 { static_assert_uimm_bits!(IMM3, 3); - __lsx_vbitclri_b(a, IMM3) + unsafe { __lsx_vbitclri_b(a, IMM3) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vbitclri_h(a: v8u16) -> v8u16 { +pub fn lsx_vbitclri_h(a: v8u16) -> v8u16 { static_assert_uimm_bits!(IMM4, 4); - __lsx_vbitclri_h(a, IMM4) + unsafe { __lsx_vbitclri_h(a, IMM4) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vbitclri_w(a: v4u32) -> v4u32 { +pub fn lsx_vbitclri_w(a: v4u32) -> v4u32 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vbitclri_w(a, IMM5) + unsafe { __lsx_vbitclri_w(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vbitclri_d(a: v2u64) -> v2u64 { +pub fn lsx_vbitclri_d(a: v2u64) -> v2u64 { static_assert_uimm_bits!(IMM6, 6); - __lsx_vbitclri_d(a, IMM6) + unsafe { __lsx_vbitclri_d(a, IMM6) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vbitset_b(a: v16u8, b: v16u8) -> v16u8 { - __lsx_vbitset_b(a, b) +pub fn lsx_vbitset_b(a: v16u8, b: v16u8) -> v16u8 { + unsafe { __lsx_vbitset_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vbitset_h(a: v8u16, b: v8u16) -> v8u16 { - __lsx_vbitset_h(a, b) +pub fn lsx_vbitset_h(a: v8u16, b: v8u16) -> v8u16 { + unsafe { __lsx_vbitset_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vbitset_w(a: v4u32, b: v4u32) -> v4u32 { - __lsx_vbitset_w(a, b) +pub fn lsx_vbitset_w(a: v4u32, b: v4u32) -> v4u32 { + unsafe { __lsx_vbitset_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vbitset_d(a: v2u64, b: v2u64) -> v2u64 { - __lsx_vbitset_d(a, b) +pub fn lsx_vbitset_d(a: v2u64, b: v2u64) -> v2u64 { + unsafe { __lsx_vbitset_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vbitseti_b(a: v16u8) -> v16u8 { +pub fn lsx_vbitseti_b(a: v16u8) -> v16u8 { static_assert_uimm_bits!(IMM3, 3); - __lsx_vbitseti_b(a, IMM3) + unsafe { __lsx_vbitseti_b(a, IMM3) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vbitseti_h(a: v8u16) -> v8u16 { +pub fn lsx_vbitseti_h(a: v8u16) -> v8u16 { static_assert_uimm_bits!(IMM4, 4); - __lsx_vbitseti_h(a, IMM4) + unsafe { __lsx_vbitseti_h(a, IMM4) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vbitseti_w(a: v4u32) -> v4u32 { +pub fn lsx_vbitseti_w(a: v4u32) -> v4u32 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vbitseti_w(a, IMM5) + unsafe { __lsx_vbitseti_w(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vbitseti_d(a: v2u64) -> v2u64 { +pub fn lsx_vbitseti_d(a: v2u64) -> v2u64 { static_assert_uimm_bits!(IMM6, 6); - __lsx_vbitseti_d(a, IMM6) + unsafe { __lsx_vbitseti_d(a, IMM6) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vbitrev_b(a: v16u8, b: v16u8) -> v16u8 { - __lsx_vbitrev_b(a, b) +pub fn lsx_vbitrev_b(a: v16u8, b: v16u8) -> v16u8 { + unsafe { __lsx_vbitrev_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vbitrev_h(a: v8u16, b: v8u16) -> v8u16 { - __lsx_vbitrev_h(a, b) +pub fn lsx_vbitrev_h(a: v8u16, b: v8u16) -> v8u16 { + unsafe { __lsx_vbitrev_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vbitrev_w(a: v4u32, b: v4u32) -> v4u32 { - __lsx_vbitrev_w(a, b) +pub fn lsx_vbitrev_w(a: v4u32, b: v4u32) -> v4u32 { + unsafe { __lsx_vbitrev_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vbitrev_d(a: v2u64, b: v2u64) -> v2u64 { - __lsx_vbitrev_d(a, b) +pub fn lsx_vbitrev_d(a: v2u64, b: v2u64) -> v2u64 { + unsafe { __lsx_vbitrev_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vbitrevi_b(a: v16u8) -> v16u8 { +pub fn lsx_vbitrevi_b(a: v16u8) -> v16u8 { static_assert_uimm_bits!(IMM3, 3); - __lsx_vbitrevi_b(a, IMM3) + unsafe { __lsx_vbitrevi_b(a, IMM3) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vbitrevi_h(a: v8u16) -> v8u16 { +pub fn lsx_vbitrevi_h(a: v8u16) -> v8u16 { static_assert_uimm_bits!(IMM4, 4); - __lsx_vbitrevi_h(a, IMM4) + unsafe { __lsx_vbitrevi_h(a, IMM4) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vbitrevi_w(a: v4u32) -> v4u32 { +pub fn lsx_vbitrevi_w(a: v4u32) -> v4u32 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vbitrevi_w(a, IMM5) + unsafe { __lsx_vbitrevi_w(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vbitrevi_d(a: v2u64) -> v2u64 { +pub fn lsx_vbitrevi_d(a: v2u64) -> v2u64 { static_assert_uimm_bits!(IMM6, 6); - __lsx_vbitrevi_d(a, IMM6) + unsafe { __lsx_vbitrevi_d(a, IMM6) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vadd_b(a: v16i8, b: v16i8) -> v16i8 { - __lsx_vadd_b(a, b) +pub fn lsx_vadd_b(a: v16i8, b: v16i8) -> v16i8 { + unsafe { __lsx_vadd_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vadd_h(a: v8i16, b: v8i16) -> v8i16 { - __lsx_vadd_h(a, b) +pub fn lsx_vadd_h(a: v8i16, b: v8i16) -> v8i16 { + unsafe { __lsx_vadd_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vadd_w(a: v4i32, b: v4i32) -> v4i32 { - __lsx_vadd_w(a, b) +pub fn lsx_vadd_w(a: v4i32, b: v4i32) -> v4i32 { + unsafe { __lsx_vadd_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vadd_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vadd_d(a, b) +pub fn lsx_vadd_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vadd_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vaddi_bu(a: v16i8) -> v16i8 { +pub fn lsx_vaddi_bu(a: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vaddi_bu(a, IMM5) + unsafe { __lsx_vaddi_bu(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vaddi_hu(a: v8i16) -> v8i16 { +pub fn lsx_vaddi_hu(a: v8i16) -> v8i16 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vaddi_hu(a, IMM5) + unsafe { __lsx_vaddi_hu(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vaddi_wu(a: v4i32) -> v4i32 { +pub fn lsx_vaddi_wu(a: v4i32) -> v4i32 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vaddi_wu(a, IMM5) + unsafe { __lsx_vaddi_wu(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vaddi_du(a: v2i64) -> v2i64 { +pub fn lsx_vaddi_du(a: v2i64) -> v2i64 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vaddi_du(a, IMM5) + unsafe { __lsx_vaddi_du(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsub_b(a: v16i8, b: v16i8) -> v16i8 { - __lsx_vsub_b(a, b) +pub fn lsx_vsub_b(a: v16i8, b: v16i8) -> v16i8 { + unsafe { __lsx_vsub_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsub_h(a: v8i16, b: v8i16) -> v8i16 { - __lsx_vsub_h(a, b) +pub fn lsx_vsub_h(a: v8i16, b: v8i16) -> v8i16 { + unsafe { __lsx_vsub_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsub_w(a: v4i32, b: v4i32) -> v4i32 { - __lsx_vsub_w(a, b) +pub fn lsx_vsub_w(a: v4i32, b: v4i32) -> v4i32 { + unsafe { __lsx_vsub_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsub_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vsub_d(a, b) +pub fn lsx_vsub_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vsub_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsubi_bu(a: v16i8) -> v16i8 { +pub fn lsx_vsubi_bu(a: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vsubi_bu(a, IMM5) + unsafe { __lsx_vsubi_bu(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsubi_hu(a: v8i16) -> v8i16 { +pub fn lsx_vsubi_hu(a: v8i16) -> v8i16 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vsubi_hu(a, IMM5) + unsafe { __lsx_vsubi_hu(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsubi_wu(a: v4i32) -> v4i32 { +pub fn lsx_vsubi_wu(a: v4i32) -> v4i32 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vsubi_wu(a, IMM5) + unsafe { __lsx_vsubi_wu(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsubi_du(a: v2i64) -> v2i64 { +pub fn lsx_vsubi_du(a: v2i64) -> v2i64 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vsubi_du(a, IMM5) + unsafe { __lsx_vsubi_du(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmax_b(a: v16i8, b: v16i8) -> v16i8 { - __lsx_vmax_b(a, b) +pub fn lsx_vmax_b(a: v16i8, b: v16i8) -> v16i8 { + unsafe { __lsx_vmax_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmax_h(a: v8i16, b: v8i16) -> v8i16 { - __lsx_vmax_h(a, b) +pub fn lsx_vmax_h(a: v8i16, b: v8i16) -> v8i16 { + unsafe { __lsx_vmax_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmax_w(a: v4i32, b: v4i32) -> v4i32 { - __lsx_vmax_w(a, b) +pub fn lsx_vmax_w(a: v4i32, b: v4i32) -> v4i32 { + unsafe { __lsx_vmax_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmax_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vmax_d(a, b) +pub fn lsx_vmax_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vmax_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmaxi_b(a: v16i8) -> v16i8 { +pub fn lsx_vmaxi_b(a: v16i8) -> v16i8 { static_assert_simm_bits!(IMM_S5, 5); - __lsx_vmaxi_b(a, IMM_S5) + unsafe { __lsx_vmaxi_b(a, IMM_S5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmaxi_h(a: v8i16) -> v8i16 { +pub fn lsx_vmaxi_h(a: v8i16) -> v8i16 { static_assert_simm_bits!(IMM_S5, 5); - __lsx_vmaxi_h(a, IMM_S5) + unsafe { __lsx_vmaxi_h(a, IMM_S5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmaxi_w(a: v4i32) -> v4i32 { +pub fn lsx_vmaxi_w(a: v4i32) -> v4i32 { static_assert_simm_bits!(IMM_S5, 5); - __lsx_vmaxi_w(a, IMM_S5) + unsafe { __lsx_vmaxi_w(a, IMM_S5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmaxi_d(a: v2i64) -> v2i64 { +pub fn lsx_vmaxi_d(a: v2i64) -> v2i64 { static_assert_simm_bits!(IMM_S5, 5); - __lsx_vmaxi_d(a, IMM_S5) + unsafe { __lsx_vmaxi_d(a, IMM_S5) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmax_bu(a: v16u8, b: v16u8) -> v16u8 { - __lsx_vmax_bu(a, b) +pub fn lsx_vmax_bu(a: v16u8, b: v16u8) -> v16u8 { + unsafe { __lsx_vmax_bu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmax_hu(a: v8u16, b: v8u16) -> v8u16 { - __lsx_vmax_hu(a, b) +pub fn lsx_vmax_hu(a: v8u16, b: v8u16) -> v8u16 { + unsafe { __lsx_vmax_hu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmax_wu(a: v4u32, b: v4u32) -> v4u32 { - __lsx_vmax_wu(a, b) +pub fn lsx_vmax_wu(a: v4u32, b: v4u32) -> v4u32 { + unsafe { __lsx_vmax_wu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmax_du(a: v2u64, b: v2u64) -> v2u64 { - __lsx_vmax_du(a, b) +pub fn lsx_vmax_du(a: v2u64, b: v2u64) -> v2u64 { + unsafe { __lsx_vmax_du(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmaxi_bu(a: v16u8) -> v16u8 { +pub fn lsx_vmaxi_bu(a: v16u8) -> v16u8 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vmaxi_bu(a, IMM5) + unsafe { __lsx_vmaxi_bu(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmaxi_hu(a: v8u16) -> v8u16 { +pub fn lsx_vmaxi_hu(a: v8u16) -> v8u16 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vmaxi_hu(a, IMM5) + unsafe { __lsx_vmaxi_hu(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmaxi_wu(a: v4u32) -> v4u32 { +pub fn lsx_vmaxi_wu(a: v4u32) -> v4u32 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vmaxi_wu(a, IMM5) + unsafe { __lsx_vmaxi_wu(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmaxi_du(a: v2u64) -> v2u64 { +pub fn lsx_vmaxi_du(a: v2u64) -> v2u64 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vmaxi_du(a, IMM5) + unsafe { __lsx_vmaxi_du(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmin_b(a: v16i8, b: v16i8) -> v16i8 { - __lsx_vmin_b(a, b) +pub fn lsx_vmin_b(a: v16i8, b: v16i8) -> v16i8 { + unsafe { __lsx_vmin_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmin_h(a: v8i16, b: v8i16) -> v8i16 { - __lsx_vmin_h(a, b) +pub fn lsx_vmin_h(a: v8i16, b: v8i16) -> v8i16 { + unsafe { __lsx_vmin_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmin_w(a: v4i32, b: v4i32) -> v4i32 { - __lsx_vmin_w(a, b) +pub fn lsx_vmin_w(a: v4i32, b: v4i32) -> v4i32 { + unsafe { __lsx_vmin_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmin_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vmin_d(a, b) +pub fn lsx_vmin_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vmin_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmini_b(a: v16i8) -> v16i8 { +pub fn lsx_vmini_b(a: v16i8) -> v16i8 { static_assert_simm_bits!(IMM_S5, 5); - __lsx_vmini_b(a, IMM_S5) + unsafe { __lsx_vmini_b(a, IMM_S5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmini_h(a: v8i16) -> v8i16 { +pub fn lsx_vmini_h(a: v8i16) -> v8i16 { static_assert_simm_bits!(IMM_S5, 5); - __lsx_vmini_h(a, IMM_S5) + unsafe { __lsx_vmini_h(a, IMM_S5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmini_w(a: v4i32) -> v4i32 { +pub fn lsx_vmini_w(a: v4i32) -> v4i32 { static_assert_simm_bits!(IMM_S5, 5); - __lsx_vmini_w(a, IMM_S5) + unsafe { __lsx_vmini_w(a, IMM_S5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmini_d(a: v2i64) -> v2i64 { +pub fn lsx_vmini_d(a: v2i64) -> v2i64 { static_assert_simm_bits!(IMM_S5, 5); - __lsx_vmini_d(a, IMM_S5) + unsafe { __lsx_vmini_d(a, IMM_S5) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmin_bu(a: v16u8, b: v16u8) -> v16u8 { - __lsx_vmin_bu(a, b) +pub fn lsx_vmin_bu(a: v16u8, b: v16u8) -> v16u8 { + unsafe { __lsx_vmin_bu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmin_hu(a: v8u16, b: v8u16) -> v8u16 { - __lsx_vmin_hu(a, b) +pub fn lsx_vmin_hu(a: v8u16, b: v8u16) -> v8u16 { + unsafe { __lsx_vmin_hu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmin_wu(a: v4u32, b: v4u32) -> v4u32 { - __lsx_vmin_wu(a, b) +pub fn lsx_vmin_wu(a: v4u32, b: v4u32) -> v4u32 { + unsafe { __lsx_vmin_wu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmin_du(a: v2u64, b: v2u64) -> v2u64 { - __lsx_vmin_du(a, b) +pub fn lsx_vmin_du(a: v2u64, b: v2u64) -> v2u64 { + unsafe { __lsx_vmin_du(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmini_bu(a: v16u8) -> v16u8 { +pub fn lsx_vmini_bu(a: v16u8) -> v16u8 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vmini_bu(a, IMM5) + unsafe { __lsx_vmini_bu(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmini_hu(a: v8u16) -> v8u16 { +pub fn lsx_vmini_hu(a: v8u16) -> v8u16 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vmini_hu(a, IMM5) + unsafe { __lsx_vmini_hu(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmini_wu(a: v4u32) -> v4u32 { +pub fn lsx_vmini_wu(a: v4u32) -> v4u32 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vmini_wu(a, IMM5) + unsafe { __lsx_vmini_wu(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmini_du(a: v2u64) -> v2u64 { +pub fn lsx_vmini_du(a: v2u64) -> v2u64 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vmini_du(a, IMM5) + unsafe { __lsx_vmini_du(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vseq_b(a: v16i8, b: v16i8) -> v16i8 { - __lsx_vseq_b(a, b) +pub fn lsx_vseq_b(a: v16i8, b: v16i8) -> v16i8 { + unsafe { __lsx_vseq_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vseq_h(a: v8i16, b: v8i16) -> v8i16 { - __lsx_vseq_h(a, b) +pub fn lsx_vseq_h(a: v8i16, b: v8i16) -> v8i16 { + unsafe { __lsx_vseq_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vseq_w(a: v4i32, b: v4i32) -> v4i32 { - __lsx_vseq_w(a, b) +pub fn lsx_vseq_w(a: v4i32, b: v4i32) -> v4i32 { + unsafe { __lsx_vseq_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vseq_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vseq_d(a, b) +pub fn lsx_vseq_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vseq_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vseqi_b(a: v16i8) -> v16i8 { +pub fn lsx_vseqi_b(a: v16i8) -> v16i8 { static_assert_simm_bits!(IMM_S5, 5); - __lsx_vseqi_b(a, IMM_S5) + unsafe { __lsx_vseqi_b(a, IMM_S5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vseqi_h(a: v8i16) -> v8i16 { +pub fn lsx_vseqi_h(a: v8i16) -> v8i16 { static_assert_simm_bits!(IMM_S5, 5); - __lsx_vseqi_h(a, IMM_S5) + unsafe { __lsx_vseqi_h(a, IMM_S5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vseqi_w(a: v4i32) -> v4i32 { +pub fn lsx_vseqi_w(a: v4i32) -> v4i32 { static_assert_simm_bits!(IMM_S5, 5); - __lsx_vseqi_w(a, IMM_S5) + unsafe { __lsx_vseqi_w(a, IMM_S5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vseqi_d(a: v2i64) -> v2i64 { +pub fn lsx_vseqi_d(a: v2i64) -> v2i64 { static_assert_simm_bits!(IMM_S5, 5); - __lsx_vseqi_d(a, IMM_S5) + unsafe { __lsx_vseqi_d(a, IMM_S5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vslti_b(a: v16i8) -> v16i8 { +pub fn lsx_vslti_b(a: v16i8) -> v16i8 { static_assert_simm_bits!(IMM_S5, 5); - __lsx_vslti_b(a, IMM_S5) + unsafe { __lsx_vslti_b(a, IMM_S5) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vslt_b(a: v16i8, b: v16i8) -> v16i8 { - __lsx_vslt_b(a, b) +pub fn lsx_vslt_b(a: v16i8, b: v16i8) -> v16i8 { + unsafe { __lsx_vslt_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vslt_h(a: v8i16, b: v8i16) -> v8i16 { - __lsx_vslt_h(a, b) +pub fn lsx_vslt_h(a: v8i16, b: v8i16) -> v8i16 { + unsafe { __lsx_vslt_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vslt_w(a: v4i32, b: v4i32) -> v4i32 { - __lsx_vslt_w(a, b) +pub fn lsx_vslt_w(a: v4i32, b: v4i32) -> v4i32 { + unsafe { __lsx_vslt_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vslt_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vslt_d(a, b) +pub fn lsx_vslt_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vslt_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vslti_h(a: v8i16) -> v8i16 { +pub fn lsx_vslti_h(a: v8i16) -> v8i16 { static_assert_simm_bits!(IMM_S5, 5); - __lsx_vslti_h(a, IMM_S5) + unsafe { __lsx_vslti_h(a, IMM_S5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vslti_w(a: v4i32) -> v4i32 { +pub fn lsx_vslti_w(a: v4i32) -> v4i32 { static_assert_simm_bits!(IMM_S5, 5); - __lsx_vslti_w(a, IMM_S5) + unsafe { __lsx_vslti_w(a, IMM_S5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vslti_d(a: v2i64) -> v2i64 { +pub fn lsx_vslti_d(a: v2i64) -> v2i64 { static_assert_simm_bits!(IMM_S5, 5); - __lsx_vslti_d(a, IMM_S5) + unsafe { __lsx_vslti_d(a, IMM_S5) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vslt_bu(a: v16u8, b: v16u8) -> v16i8 { - __lsx_vslt_bu(a, b) +pub fn lsx_vslt_bu(a: v16u8, b: v16u8) -> v16i8 { + unsafe { __lsx_vslt_bu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vslt_hu(a: v8u16, b: v8u16) -> v8i16 { - __lsx_vslt_hu(a, b) +pub fn lsx_vslt_hu(a: v8u16, b: v8u16) -> v8i16 { + unsafe { __lsx_vslt_hu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vslt_wu(a: v4u32, b: v4u32) -> v4i32 { - __lsx_vslt_wu(a, b) +pub fn lsx_vslt_wu(a: v4u32, b: v4u32) -> v4i32 { + unsafe { __lsx_vslt_wu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vslt_du(a: v2u64, b: v2u64) -> v2i64 { - __lsx_vslt_du(a, b) +pub fn lsx_vslt_du(a: v2u64, b: v2u64) -> v2i64 { + unsafe { __lsx_vslt_du(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vslti_bu(a: v16u8) -> v16i8 { +pub fn lsx_vslti_bu(a: v16u8) -> v16i8 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vslti_bu(a, IMM5) + unsafe { __lsx_vslti_bu(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vslti_hu(a: v8u16) -> v8i16 { +pub fn lsx_vslti_hu(a: v8u16) -> v8i16 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vslti_hu(a, IMM5) + unsafe { __lsx_vslti_hu(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vslti_wu(a: v4u32) -> v4i32 { +pub fn lsx_vslti_wu(a: v4u32) -> v4i32 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vslti_wu(a, IMM5) + unsafe { __lsx_vslti_wu(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vslti_du(a: v2u64) -> v2i64 { +pub fn lsx_vslti_du(a: v2u64) -> v2i64 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vslti_du(a, IMM5) + unsafe { __lsx_vslti_du(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsle_b(a: v16i8, b: v16i8) -> v16i8 { - __lsx_vsle_b(a, b) +pub fn lsx_vsle_b(a: v16i8, b: v16i8) -> v16i8 { + unsafe { __lsx_vsle_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsle_h(a: v8i16, b: v8i16) -> v8i16 { - __lsx_vsle_h(a, b) +pub fn lsx_vsle_h(a: v8i16, b: v8i16) -> v8i16 { + unsafe { __lsx_vsle_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsle_w(a: v4i32, b: v4i32) -> v4i32 { - __lsx_vsle_w(a, b) +pub fn lsx_vsle_w(a: v4i32, b: v4i32) -> v4i32 { + unsafe { __lsx_vsle_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsle_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vsle_d(a, b) +pub fn lsx_vsle_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vsle_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vslei_b(a: v16i8) -> v16i8 { +pub fn lsx_vslei_b(a: v16i8) -> v16i8 { static_assert_simm_bits!(IMM_S5, 5); - __lsx_vslei_b(a, IMM_S5) + unsafe { __lsx_vslei_b(a, IMM_S5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vslei_h(a: v8i16) -> v8i16 { +pub fn lsx_vslei_h(a: v8i16) -> v8i16 { static_assert_simm_bits!(IMM_S5, 5); - __lsx_vslei_h(a, IMM_S5) + unsafe { __lsx_vslei_h(a, IMM_S5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vslei_w(a: v4i32) -> v4i32 { +pub fn lsx_vslei_w(a: v4i32) -> v4i32 { static_assert_simm_bits!(IMM_S5, 5); - __lsx_vslei_w(a, IMM_S5) + unsafe { __lsx_vslei_w(a, IMM_S5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vslei_d(a: v2i64) -> v2i64 { +pub fn lsx_vslei_d(a: v2i64) -> v2i64 { static_assert_simm_bits!(IMM_S5, 5); - __lsx_vslei_d(a, IMM_S5) + unsafe { __lsx_vslei_d(a, IMM_S5) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsle_bu(a: v16u8, b: v16u8) -> v16i8 { - __lsx_vsle_bu(a, b) +pub fn lsx_vsle_bu(a: v16u8, b: v16u8) -> v16i8 { + unsafe { __lsx_vsle_bu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsle_hu(a: v8u16, b: v8u16) -> v8i16 { - __lsx_vsle_hu(a, b) +pub fn lsx_vsle_hu(a: v8u16, b: v8u16) -> v8i16 { + unsafe { __lsx_vsle_hu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsle_wu(a: v4u32, b: v4u32) -> v4i32 { - __lsx_vsle_wu(a, b) +pub fn lsx_vsle_wu(a: v4u32, b: v4u32) -> v4i32 { + unsafe { __lsx_vsle_wu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsle_du(a: v2u64, b: v2u64) -> v2i64 { - __lsx_vsle_du(a, b) +pub fn lsx_vsle_du(a: v2u64, b: v2u64) -> v2i64 { + unsafe { __lsx_vsle_du(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vslei_bu(a: v16u8) -> v16i8 { +pub fn lsx_vslei_bu(a: v16u8) -> v16i8 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vslei_bu(a, IMM5) + unsafe { __lsx_vslei_bu(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vslei_hu(a: v8u16) -> v8i16 { +pub fn lsx_vslei_hu(a: v8u16) -> v8i16 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vslei_hu(a, IMM5) + unsafe { __lsx_vslei_hu(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vslei_wu(a: v4u32) -> v4i32 { +pub fn lsx_vslei_wu(a: v4u32) -> v4i32 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vslei_wu(a, IMM5) + unsafe { __lsx_vslei_wu(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vslei_du(a: v2u64) -> v2i64 { +pub fn lsx_vslei_du(a: v2u64) -> v2i64 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vslei_du(a, IMM5) + unsafe { __lsx_vslei_du(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsat_b(a: v16i8) -> v16i8 { +pub fn lsx_vsat_b(a: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM3, 3); - __lsx_vsat_b(a, IMM3) + unsafe { __lsx_vsat_b(a, IMM3) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsat_h(a: v8i16) -> v8i16 { +pub fn lsx_vsat_h(a: v8i16) -> v8i16 { static_assert_uimm_bits!(IMM4, 4); - __lsx_vsat_h(a, IMM4) + unsafe { __lsx_vsat_h(a, IMM4) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsat_w(a: v4i32) -> v4i32 { +pub fn lsx_vsat_w(a: v4i32) -> v4i32 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vsat_w(a, IMM5) + unsafe { __lsx_vsat_w(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsat_d(a: v2i64) -> v2i64 { +pub fn lsx_vsat_d(a: v2i64) -> v2i64 { static_assert_uimm_bits!(IMM6, 6); - __lsx_vsat_d(a, IMM6) + unsafe { __lsx_vsat_d(a, IMM6) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsat_bu(a: v16u8) -> v16u8 { +pub fn lsx_vsat_bu(a: v16u8) -> v16u8 { static_assert_uimm_bits!(IMM3, 3); - __lsx_vsat_bu(a, IMM3) + unsafe { __lsx_vsat_bu(a, IMM3) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsat_hu(a: v8u16) -> v8u16 { +pub fn lsx_vsat_hu(a: v8u16) -> v8u16 { static_assert_uimm_bits!(IMM4, 4); - __lsx_vsat_hu(a, IMM4) + unsafe { __lsx_vsat_hu(a, IMM4) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsat_wu(a: v4u32) -> v4u32 { +pub fn lsx_vsat_wu(a: v4u32) -> v4u32 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vsat_wu(a, IMM5) + unsafe { __lsx_vsat_wu(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsat_du(a: v2u64) -> v2u64 { +pub fn lsx_vsat_du(a: v2u64) -> v2u64 { static_assert_uimm_bits!(IMM6, 6); - __lsx_vsat_du(a, IMM6) + unsafe { __lsx_vsat_du(a, IMM6) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vadda_b(a: v16i8, b: v16i8) -> v16i8 { - __lsx_vadda_b(a, b) +pub fn lsx_vadda_b(a: v16i8, b: v16i8) -> v16i8 { + unsafe { __lsx_vadda_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vadda_h(a: v8i16, b: v8i16) -> v8i16 { - __lsx_vadda_h(a, b) +pub fn lsx_vadda_h(a: v8i16, b: v8i16) -> v8i16 { + unsafe { __lsx_vadda_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vadda_w(a: v4i32, b: v4i32) -> v4i32 { - __lsx_vadda_w(a, b) +pub fn lsx_vadda_w(a: v4i32, b: v4i32) -> v4i32 { + unsafe { __lsx_vadda_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vadda_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vadda_d(a, b) +pub fn lsx_vadda_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vadda_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsadd_b(a: v16i8, b: v16i8) -> v16i8 { - __lsx_vsadd_b(a, b) +pub fn lsx_vsadd_b(a: v16i8, b: v16i8) -> v16i8 { + unsafe { __lsx_vsadd_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsadd_h(a: v8i16, b: v8i16) -> v8i16 { - __lsx_vsadd_h(a, b) +pub fn lsx_vsadd_h(a: v8i16, b: v8i16) -> v8i16 { + unsafe { __lsx_vsadd_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsadd_w(a: v4i32, b: v4i32) -> v4i32 { - __lsx_vsadd_w(a, b) +pub fn lsx_vsadd_w(a: v4i32, b: v4i32) -> v4i32 { + unsafe { __lsx_vsadd_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsadd_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vsadd_d(a, b) +pub fn lsx_vsadd_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vsadd_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsadd_bu(a: v16u8, b: v16u8) -> v16u8 { - __lsx_vsadd_bu(a, b) +pub fn lsx_vsadd_bu(a: v16u8, b: v16u8) -> v16u8 { + unsafe { __lsx_vsadd_bu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsadd_hu(a: v8u16, b: v8u16) -> v8u16 { - __lsx_vsadd_hu(a, b) +pub fn lsx_vsadd_hu(a: v8u16, b: v8u16) -> v8u16 { + unsafe { __lsx_vsadd_hu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsadd_wu(a: v4u32, b: v4u32) -> v4u32 { - __lsx_vsadd_wu(a, b) +pub fn lsx_vsadd_wu(a: v4u32, b: v4u32) -> v4u32 { + unsafe { __lsx_vsadd_wu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsadd_du(a: v2u64, b: v2u64) -> v2u64 { - __lsx_vsadd_du(a, b) +pub fn lsx_vsadd_du(a: v2u64, b: v2u64) -> v2u64 { + unsafe { __lsx_vsadd_du(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vavg_b(a: v16i8, b: v16i8) -> v16i8 { - __lsx_vavg_b(a, b) +pub fn lsx_vavg_b(a: v16i8, b: v16i8) -> v16i8 { + unsafe { __lsx_vavg_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vavg_h(a: v8i16, b: v8i16) -> v8i16 { - __lsx_vavg_h(a, b) +pub fn lsx_vavg_h(a: v8i16, b: v8i16) -> v8i16 { + unsafe { __lsx_vavg_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vavg_w(a: v4i32, b: v4i32) -> v4i32 { - __lsx_vavg_w(a, b) +pub fn lsx_vavg_w(a: v4i32, b: v4i32) -> v4i32 { + unsafe { __lsx_vavg_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vavg_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vavg_d(a, b) +pub fn lsx_vavg_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vavg_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vavg_bu(a: v16u8, b: v16u8) -> v16u8 { - __lsx_vavg_bu(a, b) +pub fn lsx_vavg_bu(a: v16u8, b: v16u8) -> v16u8 { + unsafe { __lsx_vavg_bu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vavg_hu(a: v8u16, b: v8u16) -> v8u16 { - __lsx_vavg_hu(a, b) +pub fn lsx_vavg_hu(a: v8u16, b: v8u16) -> v8u16 { + unsafe { __lsx_vavg_hu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vavg_wu(a: v4u32, b: v4u32) -> v4u32 { - __lsx_vavg_wu(a, b) +pub fn lsx_vavg_wu(a: v4u32, b: v4u32) -> v4u32 { + unsafe { __lsx_vavg_wu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vavg_du(a: v2u64, b: v2u64) -> v2u64 { - __lsx_vavg_du(a, b) +pub fn lsx_vavg_du(a: v2u64, b: v2u64) -> v2u64 { + unsafe { __lsx_vavg_du(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vavgr_b(a: v16i8, b: v16i8) -> v16i8 { - __lsx_vavgr_b(a, b) +pub fn lsx_vavgr_b(a: v16i8, b: v16i8) -> v16i8 { + unsafe { __lsx_vavgr_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vavgr_h(a: v8i16, b: v8i16) -> v8i16 { - __lsx_vavgr_h(a, b) +pub fn lsx_vavgr_h(a: v8i16, b: v8i16) -> v8i16 { + unsafe { __lsx_vavgr_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vavgr_w(a: v4i32, b: v4i32) -> v4i32 { - __lsx_vavgr_w(a, b) +pub fn lsx_vavgr_w(a: v4i32, b: v4i32) -> v4i32 { + unsafe { __lsx_vavgr_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vavgr_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vavgr_d(a, b) +pub fn lsx_vavgr_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vavgr_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vavgr_bu(a: v16u8, b: v16u8) -> v16u8 { - __lsx_vavgr_bu(a, b) +pub fn lsx_vavgr_bu(a: v16u8, b: v16u8) -> v16u8 { + unsafe { __lsx_vavgr_bu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vavgr_hu(a: v8u16, b: v8u16) -> v8u16 { - __lsx_vavgr_hu(a, b) +pub fn lsx_vavgr_hu(a: v8u16, b: v8u16) -> v8u16 { + unsafe { __lsx_vavgr_hu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vavgr_wu(a: v4u32, b: v4u32) -> v4u32 { - __lsx_vavgr_wu(a, b) +pub fn lsx_vavgr_wu(a: v4u32, b: v4u32) -> v4u32 { + unsafe { __lsx_vavgr_wu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vavgr_du(a: v2u64, b: v2u64) -> v2u64 { - __lsx_vavgr_du(a, b) +pub fn lsx_vavgr_du(a: v2u64, b: v2u64) -> v2u64 { + unsafe { __lsx_vavgr_du(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssub_b(a: v16i8, b: v16i8) -> v16i8 { - __lsx_vssub_b(a, b) +pub fn lsx_vssub_b(a: v16i8, b: v16i8) -> v16i8 { + unsafe { __lsx_vssub_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssub_h(a: v8i16, b: v8i16) -> v8i16 { - __lsx_vssub_h(a, b) +pub fn lsx_vssub_h(a: v8i16, b: v8i16) -> v8i16 { + unsafe { __lsx_vssub_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssub_w(a: v4i32, b: v4i32) -> v4i32 { - __lsx_vssub_w(a, b) +pub fn lsx_vssub_w(a: v4i32, b: v4i32) -> v4i32 { + unsafe { __lsx_vssub_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssub_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vssub_d(a, b) +pub fn lsx_vssub_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vssub_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssub_bu(a: v16u8, b: v16u8) -> v16u8 { - __lsx_vssub_bu(a, b) +pub fn lsx_vssub_bu(a: v16u8, b: v16u8) -> v16u8 { + unsafe { __lsx_vssub_bu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssub_hu(a: v8u16, b: v8u16) -> v8u16 { - __lsx_vssub_hu(a, b) +pub fn lsx_vssub_hu(a: v8u16, b: v8u16) -> v8u16 { + unsafe { __lsx_vssub_hu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssub_wu(a: v4u32, b: v4u32) -> v4u32 { - __lsx_vssub_wu(a, b) +pub fn lsx_vssub_wu(a: v4u32, b: v4u32) -> v4u32 { + unsafe { __lsx_vssub_wu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssub_du(a: v2u64, b: v2u64) -> v2u64 { - __lsx_vssub_du(a, b) +pub fn lsx_vssub_du(a: v2u64, b: v2u64) -> v2u64 { + unsafe { __lsx_vssub_du(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vabsd_b(a: v16i8, b: v16i8) -> v16i8 { - __lsx_vabsd_b(a, b) +pub fn lsx_vabsd_b(a: v16i8, b: v16i8) -> v16i8 { + unsafe { __lsx_vabsd_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vabsd_h(a: v8i16, b: v8i16) -> v8i16 { - __lsx_vabsd_h(a, b) +pub fn lsx_vabsd_h(a: v8i16, b: v8i16) -> v8i16 { + unsafe { __lsx_vabsd_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vabsd_w(a: v4i32, b: v4i32) -> v4i32 { - __lsx_vabsd_w(a, b) +pub fn lsx_vabsd_w(a: v4i32, b: v4i32) -> v4i32 { + unsafe { __lsx_vabsd_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vabsd_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vabsd_d(a, b) +pub fn lsx_vabsd_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vabsd_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vabsd_bu(a: v16u8, b: v16u8) -> v16u8 { - __lsx_vabsd_bu(a, b) +pub fn lsx_vabsd_bu(a: v16u8, b: v16u8) -> v16u8 { + unsafe { __lsx_vabsd_bu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vabsd_hu(a: v8u16, b: v8u16) -> v8u16 { - __lsx_vabsd_hu(a, b) +pub fn lsx_vabsd_hu(a: v8u16, b: v8u16) -> v8u16 { + unsafe { __lsx_vabsd_hu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vabsd_wu(a: v4u32, b: v4u32) -> v4u32 { - __lsx_vabsd_wu(a, b) +pub fn lsx_vabsd_wu(a: v4u32, b: v4u32) -> v4u32 { + unsafe { __lsx_vabsd_wu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vabsd_du(a: v2u64, b: v2u64) -> v2u64 { - __lsx_vabsd_du(a, b) +pub fn lsx_vabsd_du(a: v2u64, b: v2u64) -> v2u64 { + unsafe { __lsx_vabsd_du(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmul_b(a: v16i8, b: v16i8) -> v16i8 { - __lsx_vmul_b(a, b) +pub fn lsx_vmul_b(a: v16i8, b: v16i8) -> v16i8 { + unsafe { __lsx_vmul_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmul_h(a: v8i16, b: v8i16) -> v8i16 { - __lsx_vmul_h(a, b) +pub fn lsx_vmul_h(a: v8i16, b: v8i16) -> v8i16 { + unsafe { __lsx_vmul_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmul_w(a: v4i32, b: v4i32) -> v4i32 { - __lsx_vmul_w(a, b) +pub fn lsx_vmul_w(a: v4i32, b: v4i32) -> v4i32 { + unsafe { __lsx_vmul_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmul_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vmul_d(a, b) +pub fn lsx_vmul_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vmul_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmadd_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8 { - __lsx_vmadd_b(a, b, c) +pub fn lsx_vmadd_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8 { + unsafe { __lsx_vmadd_b(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmadd_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 { - __lsx_vmadd_h(a, b, c) +pub fn lsx_vmadd_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 { + unsafe { __lsx_vmadd_h(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmadd_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 { - __lsx_vmadd_w(a, b, c) +pub fn lsx_vmadd_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 { + unsafe { __lsx_vmadd_w(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmadd_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64 { - __lsx_vmadd_d(a, b, c) +pub fn lsx_vmadd_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64 { + unsafe { __lsx_vmadd_d(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmsub_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8 { - __lsx_vmsub_b(a, b, c) +pub fn lsx_vmsub_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8 { + unsafe { __lsx_vmsub_b(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmsub_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 { - __lsx_vmsub_h(a, b, c) +pub fn lsx_vmsub_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 { + unsafe { __lsx_vmsub_h(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmsub_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 { - __lsx_vmsub_w(a, b, c) +pub fn lsx_vmsub_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 { + unsafe { __lsx_vmsub_w(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmsub_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64 { - __lsx_vmsub_d(a, b, c) +pub fn lsx_vmsub_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64 { + unsafe { __lsx_vmsub_d(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vdiv_b(a: v16i8, b: v16i8) -> v16i8 { - __lsx_vdiv_b(a, b) +pub fn lsx_vdiv_b(a: v16i8, b: v16i8) -> v16i8 { + unsafe { __lsx_vdiv_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vdiv_h(a: v8i16, b: v8i16) -> v8i16 { - __lsx_vdiv_h(a, b) +pub fn lsx_vdiv_h(a: v8i16, b: v8i16) -> v8i16 { + unsafe { __lsx_vdiv_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vdiv_w(a: v4i32, b: v4i32) -> v4i32 { - __lsx_vdiv_w(a, b) +pub fn lsx_vdiv_w(a: v4i32, b: v4i32) -> v4i32 { + unsafe { __lsx_vdiv_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vdiv_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vdiv_d(a, b) +pub fn lsx_vdiv_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vdiv_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vdiv_bu(a: v16u8, b: v16u8) -> v16u8 { - __lsx_vdiv_bu(a, b) +pub fn lsx_vdiv_bu(a: v16u8, b: v16u8) -> v16u8 { + unsafe { __lsx_vdiv_bu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vdiv_hu(a: v8u16, b: v8u16) -> v8u16 { - __lsx_vdiv_hu(a, b) +pub fn lsx_vdiv_hu(a: v8u16, b: v8u16) -> v8u16 { + unsafe { __lsx_vdiv_hu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vdiv_wu(a: v4u32, b: v4u32) -> v4u32 { - __lsx_vdiv_wu(a, b) +pub fn lsx_vdiv_wu(a: v4u32, b: v4u32) -> v4u32 { + unsafe { __lsx_vdiv_wu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vdiv_du(a: v2u64, b: v2u64) -> v2u64 { - __lsx_vdiv_du(a, b) +pub fn lsx_vdiv_du(a: v2u64, b: v2u64) -> v2u64 { + unsafe { __lsx_vdiv_du(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vhaddw_h_b(a: v16i8, b: v16i8) -> v8i16 { - __lsx_vhaddw_h_b(a, b) +pub fn lsx_vhaddw_h_b(a: v16i8, b: v16i8) -> v8i16 { + unsafe { __lsx_vhaddw_h_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vhaddw_w_h(a: v8i16, b: v8i16) -> v4i32 { - __lsx_vhaddw_w_h(a, b) +pub fn lsx_vhaddw_w_h(a: v8i16, b: v8i16) -> v4i32 { + unsafe { __lsx_vhaddw_w_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vhaddw_d_w(a: v4i32, b: v4i32) -> v2i64 { - __lsx_vhaddw_d_w(a, b) +pub fn lsx_vhaddw_d_w(a: v4i32, b: v4i32) -> v2i64 { + unsafe { __lsx_vhaddw_d_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vhaddw_hu_bu(a: v16u8, b: v16u8) -> v8u16 { - __lsx_vhaddw_hu_bu(a, b) +pub fn lsx_vhaddw_hu_bu(a: v16u8, b: v16u8) -> v8u16 { + unsafe { __lsx_vhaddw_hu_bu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vhaddw_wu_hu(a: v8u16, b: v8u16) -> v4u32 { - __lsx_vhaddw_wu_hu(a, b) +pub fn lsx_vhaddw_wu_hu(a: v8u16, b: v8u16) -> v4u32 { + unsafe { __lsx_vhaddw_wu_hu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vhaddw_du_wu(a: v4u32, b: v4u32) -> v2u64 { - __lsx_vhaddw_du_wu(a, b) +pub fn lsx_vhaddw_du_wu(a: v4u32, b: v4u32) -> v2u64 { + unsafe { __lsx_vhaddw_du_wu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vhsubw_h_b(a: v16i8, b: v16i8) -> v8i16 { - __lsx_vhsubw_h_b(a, b) +pub fn lsx_vhsubw_h_b(a: v16i8, b: v16i8) -> v8i16 { + unsafe { __lsx_vhsubw_h_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vhsubw_w_h(a: v8i16, b: v8i16) -> v4i32 { - __lsx_vhsubw_w_h(a, b) +pub fn lsx_vhsubw_w_h(a: v8i16, b: v8i16) -> v4i32 { + unsafe { __lsx_vhsubw_w_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vhsubw_d_w(a: v4i32, b: v4i32) -> v2i64 { - __lsx_vhsubw_d_w(a, b) +pub fn lsx_vhsubw_d_w(a: v4i32, b: v4i32) -> v2i64 { + unsafe { __lsx_vhsubw_d_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vhsubw_hu_bu(a: v16u8, b: v16u8) -> v8i16 { - __lsx_vhsubw_hu_bu(a, b) +pub fn lsx_vhsubw_hu_bu(a: v16u8, b: v16u8) -> v8i16 { + unsafe { __lsx_vhsubw_hu_bu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vhsubw_wu_hu(a: v8u16, b: v8u16) -> v4i32 { - __lsx_vhsubw_wu_hu(a, b) +pub fn lsx_vhsubw_wu_hu(a: v8u16, b: v8u16) -> v4i32 { + unsafe { __lsx_vhsubw_wu_hu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vhsubw_du_wu(a: v4u32, b: v4u32) -> v2i64 { - __lsx_vhsubw_du_wu(a, b) +pub fn lsx_vhsubw_du_wu(a: v4u32, b: v4u32) -> v2i64 { + unsafe { __lsx_vhsubw_du_wu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmod_b(a: v16i8, b: v16i8) -> v16i8 { - __lsx_vmod_b(a, b) +pub fn lsx_vmod_b(a: v16i8, b: v16i8) -> v16i8 { + unsafe { __lsx_vmod_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmod_h(a: v8i16, b: v8i16) -> v8i16 { - __lsx_vmod_h(a, b) +pub fn lsx_vmod_h(a: v8i16, b: v8i16) -> v8i16 { + unsafe { __lsx_vmod_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmod_w(a: v4i32, b: v4i32) -> v4i32 { - __lsx_vmod_w(a, b) +pub fn lsx_vmod_w(a: v4i32, b: v4i32) -> v4i32 { + unsafe { __lsx_vmod_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmod_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vmod_d(a, b) +pub fn lsx_vmod_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vmod_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmod_bu(a: v16u8, b: v16u8) -> v16u8 { - __lsx_vmod_bu(a, b) +pub fn lsx_vmod_bu(a: v16u8, b: v16u8) -> v16u8 { + unsafe { __lsx_vmod_bu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmod_hu(a: v8u16, b: v8u16) -> v8u16 { - __lsx_vmod_hu(a, b) +pub fn lsx_vmod_hu(a: v8u16, b: v8u16) -> v8u16 { + unsafe { __lsx_vmod_hu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmod_wu(a: v4u32, b: v4u32) -> v4u32 { - __lsx_vmod_wu(a, b) +pub fn lsx_vmod_wu(a: v4u32, b: v4u32) -> v4u32 { + unsafe { __lsx_vmod_wu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmod_du(a: v2u64, b: v2u64) -> v2u64 { - __lsx_vmod_du(a, b) +pub fn lsx_vmod_du(a: v2u64, b: v2u64) -> v2u64 { + unsafe { __lsx_vmod_du(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vreplve_b(a: v16i8, b: i32) -> v16i8 { - __lsx_vreplve_b(a, b) +pub fn lsx_vreplve_b(a: v16i8, b: i32) -> v16i8 { + unsafe { __lsx_vreplve_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vreplve_h(a: v8i16, b: i32) -> v8i16 { - __lsx_vreplve_h(a, b) +pub fn lsx_vreplve_h(a: v8i16, b: i32) -> v8i16 { + unsafe { __lsx_vreplve_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vreplve_w(a: v4i32, b: i32) -> v4i32 { - __lsx_vreplve_w(a, b) +pub fn lsx_vreplve_w(a: v4i32, b: i32) -> v4i32 { + unsafe { __lsx_vreplve_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vreplve_d(a: v2i64, b: i32) -> v2i64 { - __lsx_vreplve_d(a, b) +pub fn lsx_vreplve_d(a: v2i64, b: i32) -> v2i64 { + unsafe { __lsx_vreplve_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vreplvei_b(a: v16i8) -> v16i8 { +pub fn lsx_vreplvei_b(a: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM4, 4); - __lsx_vreplvei_b(a, IMM4) + unsafe { __lsx_vreplvei_b(a, IMM4) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vreplvei_h(a: v8i16) -> v8i16 { +pub fn lsx_vreplvei_h(a: v8i16) -> v8i16 { static_assert_uimm_bits!(IMM3, 3); - __lsx_vreplvei_h(a, IMM3) + unsafe { __lsx_vreplvei_h(a, IMM3) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vreplvei_w(a: v4i32) -> v4i32 { +pub fn lsx_vreplvei_w(a: v4i32) -> v4i32 { static_assert_uimm_bits!(IMM2, 2); - __lsx_vreplvei_w(a, IMM2) + unsafe { __lsx_vreplvei_w(a, IMM2) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vreplvei_d(a: v2i64) -> v2i64 { +pub fn lsx_vreplvei_d(a: v2i64) -> v2i64 { static_assert_uimm_bits!(IMM1, 1); - __lsx_vreplvei_d(a, IMM1) + unsafe { __lsx_vreplvei_d(a, IMM1) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vpickev_b(a: v16i8, b: v16i8) -> v16i8 { - __lsx_vpickev_b(a, b) +pub fn lsx_vpickev_b(a: v16i8, b: v16i8) -> v16i8 { + unsafe { __lsx_vpickev_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vpickev_h(a: v8i16, b: v8i16) -> v8i16 { - __lsx_vpickev_h(a, b) +pub fn lsx_vpickev_h(a: v8i16, b: v8i16) -> v8i16 { + unsafe { __lsx_vpickev_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vpickev_w(a: v4i32, b: v4i32) -> v4i32 { - __lsx_vpickev_w(a, b) +pub fn lsx_vpickev_w(a: v4i32, b: v4i32) -> v4i32 { + unsafe { __lsx_vpickev_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vpickev_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vpickev_d(a, b) +pub fn lsx_vpickev_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vpickev_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vpickod_b(a: v16i8, b: v16i8) -> v16i8 { - __lsx_vpickod_b(a, b) +pub fn lsx_vpickod_b(a: v16i8, b: v16i8) -> v16i8 { + unsafe { __lsx_vpickod_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vpickod_h(a: v8i16, b: v8i16) -> v8i16 { - __lsx_vpickod_h(a, b) +pub fn lsx_vpickod_h(a: v8i16, b: v8i16) -> v8i16 { + unsafe { __lsx_vpickod_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vpickod_w(a: v4i32, b: v4i32) -> v4i32 { - __lsx_vpickod_w(a, b) +pub fn lsx_vpickod_w(a: v4i32, b: v4i32) -> v4i32 { + unsafe { __lsx_vpickod_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vpickod_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vpickod_d(a, b) +pub fn lsx_vpickod_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vpickod_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vilvh_b(a: v16i8, b: v16i8) -> v16i8 { - __lsx_vilvh_b(a, b) +pub fn lsx_vilvh_b(a: v16i8, b: v16i8) -> v16i8 { + unsafe { __lsx_vilvh_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vilvh_h(a: v8i16, b: v8i16) -> v8i16 { - __lsx_vilvh_h(a, b) +pub fn lsx_vilvh_h(a: v8i16, b: v8i16) -> v8i16 { + unsafe { __lsx_vilvh_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vilvh_w(a: v4i32, b: v4i32) -> v4i32 { - __lsx_vilvh_w(a, b) +pub fn lsx_vilvh_w(a: v4i32, b: v4i32) -> v4i32 { + unsafe { __lsx_vilvh_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vilvh_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vilvh_d(a, b) +pub fn lsx_vilvh_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vilvh_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vilvl_b(a: v16i8, b: v16i8) -> v16i8 { - __lsx_vilvl_b(a, b) +pub fn lsx_vilvl_b(a: v16i8, b: v16i8) -> v16i8 { + unsafe { __lsx_vilvl_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vilvl_h(a: v8i16, b: v8i16) -> v8i16 { - __lsx_vilvl_h(a, b) +pub fn lsx_vilvl_h(a: v8i16, b: v8i16) -> v8i16 { + unsafe { __lsx_vilvl_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vilvl_w(a: v4i32, b: v4i32) -> v4i32 { - __lsx_vilvl_w(a, b) +pub fn lsx_vilvl_w(a: v4i32, b: v4i32) -> v4i32 { + unsafe { __lsx_vilvl_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vilvl_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vilvl_d(a, b) +pub fn lsx_vilvl_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vilvl_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vpackev_b(a: v16i8, b: v16i8) -> v16i8 { - __lsx_vpackev_b(a, b) +pub fn lsx_vpackev_b(a: v16i8, b: v16i8) -> v16i8 { + unsafe { __lsx_vpackev_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vpackev_h(a: v8i16, b: v8i16) -> v8i16 { - __lsx_vpackev_h(a, b) +pub fn lsx_vpackev_h(a: v8i16, b: v8i16) -> v8i16 { + unsafe { __lsx_vpackev_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vpackev_w(a: v4i32, b: v4i32) -> v4i32 { - __lsx_vpackev_w(a, b) +pub fn lsx_vpackev_w(a: v4i32, b: v4i32) -> v4i32 { + unsafe { __lsx_vpackev_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vpackev_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vpackev_d(a, b) +pub fn lsx_vpackev_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vpackev_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vpackod_b(a: v16i8, b: v16i8) -> v16i8 { - __lsx_vpackod_b(a, b) +pub fn lsx_vpackod_b(a: v16i8, b: v16i8) -> v16i8 { + unsafe { __lsx_vpackod_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vpackod_h(a: v8i16, b: v8i16) -> v8i16 { - __lsx_vpackod_h(a, b) +pub fn lsx_vpackod_h(a: v8i16, b: v8i16) -> v8i16 { + unsafe { __lsx_vpackod_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vpackod_w(a: v4i32, b: v4i32) -> v4i32 { - __lsx_vpackod_w(a, b) +pub fn lsx_vpackod_w(a: v4i32, b: v4i32) -> v4i32 { + unsafe { __lsx_vpackod_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vpackod_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vpackod_d(a, b) +pub fn lsx_vpackod_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vpackod_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vshuf_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 { - __lsx_vshuf_h(a, b, c) +pub fn lsx_vshuf_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 { + unsafe { __lsx_vshuf_h(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vshuf_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 { - __lsx_vshuf_w(a, b, c) +pub fn lsx_vshuf_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 { + unsafe { __lsx_vshuf_w(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vshuf_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64 { - __lsx_vshuf_d(a, b, c) +pub fn lsx_vshuf_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64 { + unsafe { __lsx_vshuf_d(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vand_v(a: v16u8, b: v16u8) -> v16u8 { - __lsx_vand_v(a, b) +pub fn lsx_vand_v(a: v16u8, b: v16u8) -> v16u8 { + unsafe { __lsx_vand_v(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vandi_b(a: v16u8) -> v16u8 { +pub fn lsx_vandi_b(a: v16u8) -> v16u8 { static_assert_uimm_bits!(IMM8, 8); - __lsx_vandi_b(a, IMM8) + unsafe { __lsx_vandi_b(a, IMM8) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vor_v(a: v16u8, b: v16u8) -> v16u8 { - __lsx_vor_v(a, b) +pub fn lsx_vor_v(a: v16u8, b: v16u8) -> v16u8 { + unsafe { __lsx_vor_v(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vori_b(a: v16u8) -> v16u8 { +pub fn lsx_vori_b(a: v16u8) -> v16u8 { static_assert_uimm_bits!(IMM8, 8); - __lsx_vori_b(a, IMM8) + unsafe { __lsx_vori_b(a, IMM8) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vnor_v(a: v16u8, b: v16u8) -> v16u8 { - __lsx_vnor_v(a, b) +pub fn lsx_vnor_v(a: v16u8, b: v16u8) -> v16u8 { + unsafe { __lsx_vnor_v(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vnori_b(a: v16u8) -> v16u8 { +pub fn lsx_vnori_b(a: v16u8) -> v16u8 { static_assert_uimm_bits!(IMM8, 8); - __lsx_vnori_b(a, IMM8) + unsafe { __lsx_vnori_b(a, IMM8) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vxor_v(a: v16u8, b: v16u8) -> v16u8 { - __lsx_vxor_v(a, b) +pub fn lsx_vxor_v(a: v16u8, b: v16u8) -> v16u8 { + unsafe { __lsx_vxor_v(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vxori_b(a: v16u8) -> v16u8 { +pub fn lsx_vxori_b(a: v16u8) -> v16u8 { static_assert_uimm_bits!(IMM8, 8); - __lsx_vxori_b(a, IMM8) + unsafe { __lsx_vxori_b(a, IMM8) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vbitsel_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8 { - __lsx_vbitsel_v(a, b, c) +pub fn lsx_vbitsel_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8 { + unsafe { __lsx_vbitsel_v(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vbitseli_b(a: v16u8, b: v16u8) -> v16u8 { +pub fn lsx_vbitseli_b(a: v16u8, b: v16u8) -> v16u8 { static_assert_uimm_bits!(IMM8, 8); - __lsx_vbitseli_b(a, b, IMM8) + unsafe { __lsx_vbitseli_b(a, b, IMM8) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vshuf4i_b(a: v16i8) -> v16i8 { +pub fn lsx_vshuf4i_b(a: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM8, 8); - __lsx_vshuf4i_b(a, IMM8) + unsafe { __lsx_vshuf4i_b(a, IMM8) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vshuf4i_h(a: v8i16) -> v8i16 { +pub fn lsx_vshuf4i_h(a: v8i16) -> v8i16 { static_assert_uimm_bits!(IMM8, 8); - __lsx_vshuf4i_h(a, IMM8) + unsafe { __lsx_vshuf4i_h(a, IMM8) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vshuf4i_w(a: v4i32) -> v4i32 { +pub fn lsx_vshuf4i_w(a: v4i32) -> v4i32 { static_assert_uimm_bits!(IMM8, 8); - __lsx_vshuf4i_w(a, IMM8) + unsafe { __lsx_vshuf4i_w(a, IMM8) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vreplgr2vr_b(a: i32) -> v16i8 { - __lsx_vreplgr2vr_b(a) +pub fn lsx_vreplgr2vr_b(a: i32) -> v16i8 { + unsafe { __lsx_vreplgr2vr_b(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vreplgr2vr_h(a: i32) -> v8i16 { - __lsx_vreplgr2vr_h(a) +pub fn lsx_vreplgr2vr_h(a: i32) -> v8i16 { + unsafe { __lsx_vreplgr2vr_h(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vreplgr2vr_w(a: i32) -> v4i32 { - __lsx_vreplgr2vr_w(a) +pub fn lsx_vreplgr2vr_w(a: i32) -> v4i32 { + unsafe { __lsx_vreplgr2vr_w(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vreplgr2vr_d(a: i64) -> v2i64 { - __lsx_vreplgr2vr_d(a) +pub fn lsx_vreplgr2vr_d(a: i64) -> v2i64 { + unsafe { __lsx_vreplgr2vr_d(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vpcnt_b(a: v16i8) -> v16i8 { - __lsx_vpcnt_b(a) +pub fn lsx_vpcnt_b(a: v16i8) -> v16i8 { + unsafe { __lsx_vpcnt_b(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vpcnt_h(a: v8i16) -> v8i16 { - __lsx_vpcnt_h(a) +pub fn lsx_vpcnt_h(a: v8i16) -> v8i16 { + unsafe { __lsx_vpcnt_h(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vpcnt_w(a: v4i32) -> v4i32 { - __lsx_vpcnt_w(a) +pub fn lsx_vpcnt_w(a: v4i32) -> v4i32 { + unsafe { __lsx_vpcnt_w(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vpcnt_d(a: v2i64) -> v2i64 { - __lsx_vpcnt_d(a) +pub fn lsx_vpcnt_d(a: v2i64) -> v2i64 { + unsafe { __lsx_vpcnt_d(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vclo_b(a: v16i8) -> v16i8 { - __lsx_vclo_b(a) +pub fn lsx_vclo_b(a: v16i8) -> v16i8 { + unsafe { __lsx_vclo_b(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vclo_h(a: v8i16) -> v8i16 { - __lsx_vclo_h(a) +pub fn lsx_vclo_h(a: v8i16) -> v8i16 { + unsafe { __lsx_vclo_h(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vclo_w(a: v4i32) -> v4i32 { - __lsx_vclo_w(a) +pub fn lsx_vclo_w(a: v4i32) -> v4i32 { + unsafe { __lsx_vclo_w(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vclo_d(a: v2i64) -> v2i64 { - __lsx_vclo_d(a) +pub fn lsx_vclo_d(a: v2i64) -> v2i64 { + unsafe { __lsx_vclo_d(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vclz_b(a: v16i8) -> v16i8 { - __lsx_vclz_b(a) +pub fn lsx_vclz_b(a: v16i8) -> v16i8 { + unsafe { __lsx_vclz_b(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vclz_h(a: v8i16) -> v8i16 { - __lsx_vclz_h(a) +pub fn lsx_vclz_h(a: v8i16) -> v8i16 { + unsafe { __lsx_vclz_h(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vclz_w(a: v4i32) -> v4i32 { - __lsx_vclz_w(a) +pub fn lsx_vclz_w(a: v4i32) -> v4i32 { + unsafe { __lsx_vclz_w(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vclz_d(a: v2i64) -> v2i64 { - __lsx_vclz_d(a) +pub fn lsx_vclz_d(a: v2i64) -> v2i64 { + unsafe { __lsx_vclz_d(a) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vpickve2gr_b(a: v16i8) -> i32 { +pub fn lsx_vpickve2gr_b(a: v16i8) -> i32 { static_assert_uimm_bits!(IMM4, 4); - __lsx_vpickve2gr_b(a, IMM4) + unsafe { __lsx_vpickve2gr_b(a, IMM4) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vpickve2gr_h(a: v8i16) -> i32 { +pub fn lsx_vpickve2gr_h(a: v8i16) -> i32 { static_assert_uimm_bits!(IMM3, 3); - __lsx_vpickve2gr_h(a, IMM3) + unsafe { __lsx_vpickve2gr_h(a, IMM3) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vpickve2gr_w(a: v4i32) -> i32 { +pub fn lsx_vpickve2gr_w(a: v4i32) -> i32 { static_assert_uimm_bits!(IMM2, 2); - __lsx_vpickve2gr_w(a, IMM2) + unsafe { __lsx_vpickve2gr_w(a, IMM2) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vpickve2gr_d(a: v2i64) -> i64 { +pub fn lsx_vpickve2gr_d(a: v2i64) -> i64 { static_assert_uimm_bits!(IMM1, 1); - __lsx_vpickve2gr_d(a, IMM1) + unsafe { __lsx_vpickve2gr_d(a, IMM1) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vpickve2gr_bu(a: v16i8) -> u32 { +pub fn lsx_vpickve2gr_bu(a: v16i8) -> u32 { static_assert_uimm_bits!(IMM4, 4); - __lsx_vpickve2gr_bu(a, IMM4) + unsafe { __lsx_vpickve2gr_bu(a, IMM4) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vpickve2gr_hu(a: v8i16) -> u32 { +pub fn lsx_vpickve2gr_hu(a: v8i16) -> u32 { static_assert_uimm_bits!(IMM3, 3); - __lsx_vpickve2gr_hu(a, IMM3) + unsafe { __lsx_vpickve2gr_hu(a, IMM3) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vpickve2gr_wu(a: v4i32) -> u32 { +pub fn lsx_vpickve2gr_wu(a: v4i32) -> u32 { static_assert_uimm_bits!(IMM2, 2); - __lsx_vpickve2gr_wu(a, IMM2) + unsafe { __lsx_vpickve2gr_wu(a, IMM2) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vpickve2gr_du(a: v2i64) -> u64 { +pub fn lsx_vpickve2gr_du(a: v2i64) -> u64 { static_assert_uimm_bits!(IMM1, 1); - __lsx_vpickve2gr_du(a, IMM1) + unsafe { __lsx_vpickve2gr_du(a, IMM1) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vinsgr2vr_b(a: v16i8, b: i32) -> v16i8 { +pub fn lsx_vinsgr2vr_b(a: v16i8, b: i32) -> v16i8 { static_assert_uimm_bits!(IMM4, 4); - __lsx_vinsgr2vr_b(a, b, IMM4) + unsafe { __lsx_vinsgr2vr_b(a, b, IMM4) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vinsgr2vr_h(a: v8i16, b: i32) -> v8i16 { +pub fn lsx_vinsgr2vr_h(a: v8i16, b: i32) -> v8i16 { static_assert_uimm_bits!(IMM3, 3); - __lsx_vinsgr2vr_h(a, b, IMM3) + unsafe { __lsx_vinsgr2vr_h(a, b, IMM3) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vinsgr2vr_w(a: v4i32, b: i32) -> v4i32 { +pub fn lsx_vinsgr2vr_w(a: v4i32, b: i32) -> v4i32 { static_assert_uimm_bits!(IMM2, 2); - __lsx_vinsgr2vr_w(a, b, IMM2) + unsafe { __lsx_vinsgr2vr_w(a, b, IMM2) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vinsgr2vr_d(a: v2i64, b: i64) -> v2i64 { +pub fn lsx_vinsgr2vr_d(a: v2i64, b: i64) -> v2i64 { static_assert_uimm_bits!(IMM1, 1); - __lsx_vinsgr2vr_d(a, b, IMM1) + unsafe { __lsx_vinsgr2vr_d(a, b, IMM1) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfadd_s(a: v4f32, b: v4f32) -> v4f32 { - __lsx_vfadd_s(a, b) +pub fn lsx_vfadd_s(a: v4f32, b: v4f32) -> v4f32 { + unsafe { __lsx_vfadd_s(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfadd_d(a: v2f64, b: v2f64) -> v2f64 { - __lsx_vfadd_d(a, b) +pub fn lsx_vfadd_d(a: v2f64, b: v2f64) -> v2f64 { + unsafe { __lsx_vfadd_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfsub_s(a: v4f32, b: v4f32) -> v4f32 { - __lsx_vfsub_s(a, b) +pub fn lsx_vfsub_s(a: v4f32, b: v4f32) -> v4f32 { + unsafe { __lsx_vfsub_s(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfsub_d(a: v2f64, b: v2f64) -> v2f64 { - __lsx_vfsub_d(a, b) +pub fn lsx_vfsub_d(a: v2f64, b: v2f64) -> v2f64 { + unsafe { __lsx_vfsub_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfmul_s(a: v4f32, b: v4f32) -> v4f32 { - __lsx_vfmul_s(a, b) +pub fn lsx_vfmul_s(a: v4f32, b: v4f32) -> v4f32 { + unsafe { __lsx_vfmul_s(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfmul_d(a: v2f64, b: v2f64) -> v2f64 { - __lsx_vfmul_d(a, b) +pub fn lsx_vfmul_d(a: v2f64, b: v2f64) -> v2f64 { + unsafe { __lsx_vfmul_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfdiv_s(a: v4f32, b: v4f32) -> v4f32 { - __lsx_vfdiv_s(a, b) +pub fn lsx_vfdiv_s(a: v4f32, b: v4f32) -> v4f32 { + unsafe { __lsx_vfdiv_s(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfdiv_d(a: v2f64, b: v2f64) -> v2f64 { - __lsx_vfdiv_d(a, b) +pub fn lsx_vfdiv_d(a: v2f64, b: v2f64) -> v2f64 { + unsafe { __lsx_vfdiv_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcvt_h_s(a: v4f32, b: v4f32) -> v8i16 { - __lsx_vfcvt_h_s(a, b) +pub fn lsx_vfcvt_h_s(a: v4f32, b: v4f32) -> v8i16 { + unsafe { __lsx_vfcvt_h_s(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcvt_s_d(a: v2f64, b: v2f64) -> v4f32 { - __lsx_vfcvt_s_d(a, b) +pub fn lsx_vfcvt_s_d(a: v2f64, b: v2f64) -> v4f32 { + unsafe { __lsx_vfcvt_s_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfmin_s(a: v4f32, b: v4f32) -> v4f32 { - __lsx_vfmin_s(a, b) +pub fn lsx_vfmin_s(a: v4f32, b: v4f32) -> v4f32 { + unsafe { __lsx_vfmin_s(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfmin_d(a: v2f64, b: v2f64) -> v2f64 { - __lsx_vfmin_d(a, b) +pub fn lsx_vfmin_d(a: v2f64, b: v2f64) -> v2f64 { + unsafe { __lsx_vfmin_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfmina_s(a: v4f32, b: v4f32) -> v4f32 { - __lsx_vfmina_s(a, b) +pub fn lsx_vfmina_s(a: v4f32, b: v4f32) -> v4f32 { + unsafe { __lsx_vfmina_s(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfmina_d(a: v2f64, b: v2f64) -> v2f64 { - __lsx_vfmina_d(a, b) +pub fn lsx_vfmina_d(a: v2f64, b: v2f64) -> v2f64 { + unsafe { __lsx_vfmina_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfmax_s(a: v4f32, b: v4f32) -> v4f32 { - __lsx_vfmax_s(a, b) +pub fn lsx_vfmax_s(a: v4f32, b: v4f32) -> v4f32 { + unsafe { __lsx_vfmax_s(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfmax_d(a: v2f64, b: v2f64) -> v2f64 { - __lsx_vfmax_d(a, b) +pub fn lsx_vfmax_d(a: v2f64, b: v2f64) -> v2f64 { + unsafe { __lsx_vfmax_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfmaxa_s(a: v4f32, b: v4f32) -> v4f32 { - __lsx_vfmaxa_s(a, b) +pub fn lsx_vfmaxa_s(a: v4f32, b: v4f32) -> v4f32 { + unsafe { __lsx_vfmaxa_s(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfmaxa_d(a: v2f64, b: v2f64) -> v2f64 { - __lsx_vfmaxa_d(a, b) +pub fn lsx_vfmaxa_d(a: v2f64, b: v2f64) -> v2f64 { + unsafe { __lsx_vfmaxa_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfclass_s(a: v4f32) -> v4i32 { - __lsx_vfclass_s(a) +pub fn lsx_vfclass_s(a: v4f32) -> v4i32 { + unsafe { __lsx_vfclass_s(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfclass_d(a: v2f64) -> v2i64 { - __lsx_vfclass_d(a) +pub fn lsx_vfclass_d(a: v2f64) -> v2i64 { + unsafe { __lsx_vfclass_d(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfsqrt_s(a: v4f32) -> v4f32 { - __lsx_vfsqrt_s(a) +pub fn lsx_vfsqrt_s(a: v4f32) -> v4f32 { + unsafe { __lsx_vfsqrt_s(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfsqrt_d(a: v2f64) -> v2f64 { - __lsx_vfsqrt_d(a) +pub fn lsx_vfsqrt_d(a: v2f64) -> v2f64 { + unsafe { __lsx_vfsqrt_d(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfrecip_s(a: v4f32) -> v4f32 { - __lsx_vfrecip_s(a) +pub fn lsx_vfrecip_s(a: v4f32) -> v4f32 { + unsafe { __lsx_vfrecip_s(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfrecip_d(a: v2f64) -> v2f64 { - __lsx_vfrecip_d(a) +pub fn lsx_vfrecip_d(a: v2f64) -> v2f64 { + unsafe { __lsx_vfrecip_d(a) } } #[inline] #[target_feature(enable = "lsx,frecipe")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfrecipe_s(a: v4f32) -> v4f32 { - __lsx_vfrecipe_s(a) +pub fn lsx_vfrecipe_s(a: v4f32) -> v4f32 { + unsafe { __lsx_vfrecipe_s(a) } } #[inline] #[target_feature(enable = "lsx,frecipe")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfrecipe_d(a: v2f64) -> v2f64 { - __lsx_vfrecipe_d(a) +pub fn lsx_vfrecipe_d(a: v2f64) -> v2f64 { + unsafe { __lsx_vfrecipe_d(a) } } #[inline] #[target_feature(enable = "lsx,frecipe")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfrsqrte_s(a: v4f32) -> v4f32 { - __lsx_vfrsqrte_s(a) +pub fn lsx_vfrsqrte_s(a: v4f32) -> v4f32 { + unsafe { __lsx_vfrsqrte_s(a) } } #[inline] #[target_feature(enable = "lsx,frecipe")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfrsqrte_d(a: v2f64) -> v2f64 { - __lsx_vfrsqrte_d(a) +pub fn lsx_vfrsqrte_d(a: v2f64) -> v2f64 { + unsafe { __lsx_vfrsqrte_d(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfrint_s(a: v4f32) -> v4f32 { - __lsx_vfrint_s(a) +pub fn lsx_vfrint_s(a: v4f32) -> v4f32 { + unsafe { __lsx_vfrint_s(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfrint_d(a: v2f64) -> v2f64 { - __lsx_vfrint_d(a) +pub fn lsx_vfrint_d(a: v2f64) -> v2f64 { + unsafe { __lsx_vfrint_d(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfrsqrt_s(a: v4f32) -> v4f32 { - __lsx_vfrsqrt_s(a) +pub fn lsx_vfrsqrt_s(a: v4f32) -> v4f32 { + unsafe { __lsx_vfrsqrt_s(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfrsqrt_d(a: v2f64) -> v2f64 { - __lsx_vfrsqrt_d(a) +pub fn lsx_vfrsqrt_d(a: v2f64) -> v2f64 { + unsafe { __lsx_vfrsqrt_d(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vflogb_s(a: v4f32) -> v4f32 { - __lsx_vflogb_s(a) +pub fn lsx_vflogb_s(a: v4f32) -> v4f32 { + unsafe { __lsx_vflogb_s(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vflogb_d(a: v2f64) -> v2f64 { - __lsx_vflogb_d(a) +pub fn lsx_vflogb_d(a: v2f64) -> v2f64 { + unsafe { __lsx_vflogb_d(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcvth_s_h(a: v8i16) -> v4f32 { - __lsx_vfcvth_s_h(a) +pub fn lsx_vfcvth_s_h(a: v8i16) -> v4f32 { + unsafe { __lsx_vfcvth_s_h(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcvth_d_s(a: v4f32) -> v2f64 { - __lsx_vfcvth_d_s(a) +pub fn lsx_vfcvth_d_s(a: v4f32) -> v2f64 { + unsafe { __lsx_vfcvth_d_s(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcvtl_s_h(a: v8i16) -> v4f32 { - __lsx_vfcvtl_s_h(a) +pub fn lsx_vfcvtl_s_h(a: v8i16) -> v4f32 { + unsafe { __lsx_vfcvtl_s_h(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcvtl_d_s(a: v4f32) -> v2f64 { - __lsx_vfcvtl_d_s(a) +pub fn lsx_vfcvtl_d_s(a: v4f32) -> v2f64 { + unsafe { __lsx_vfcvtl_d_s(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vftint_w_s(a: v4f32) -> v4i32 { - __lsx_vftint_w_s(a) +pub fn lsx_vftint_w_s(a: v4f32) -> v4i32 { + unsafe { __lsx_vftint_w_s(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vftint_l_d(a: v2f64) -> v2i64 { - __lsx_vftint_l_d(a) +pub fn lsx_vftint_l_d(a: v2f64) -> v2i64 { + unsafe { __lsx_vftint_l_d(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vftint_wu_s(a: v4f32) -> v4u32 { - __lsx_vftint_wu_s(a) +pub fn lsx_vftint_wu_s(a: v4f32) -> v4u32 { + unsafe { __lsx_vftint_wu_s(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vftint_lu_d(a: v2f64) -> v2u64 { - __lsx_vftint_lu_d(a) +pub fn lsx_vftint_lu_d(a: v2f64) -> v2u64 { + unsafe { __lsx_vftint_lu_d(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vftintrz_w_s(a: v4f32) -> v4i32 { - __lsx_vftintrz_w_s(a) +pub fn lsx_vftintrz_w_s(a: v4f32) -> v4i32 { + unsafe { __lsx_vftintrz_w_s(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vftintrz_l_d(a: v2f64) -> v2i64 { - __lsx_vftintrz_l_d(a) +pub fn lsx_vftintrz_l_d(a: v2f64) -> v2i64 { + unsafe { __lsx_vftintrz_l_d(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vftintrz_wu_s(a: v4f32) -> v4u32 { - __lsx_vftintrz_wu_s(a) +pub fn lsx_vftintrz_wu_s(a: v4f32) -> v4u32 { + unsafe { __lsx_vftintrz_wu_s(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vftintrz_lu_d(a: v2f64) -> v2u64 { - __lsx_vftintrz_lu_d(a) +pub fn lsx_vftintrz_lu_d(a: v2f64) -> v2u64 { + unsafe { __lsx_vftintrz_lu_d(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vffint_s_w(a: v4i32) -> v4f32 { - __lsx_vffint_s_w(a) +pub fn lsx_vffint_s_w(a: v4i32) -> v4f32 { + unsafe { __lsx_vffint_s_w(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vffint_d_l(a: v2i64) -> v2f64 { - __lsx_vffint_d_l(a) +pub fn lsx_vffint_d_l(a: v2i64) -> v2f64 { + unsafe { __lsx_vffint_d_l(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vffint_s_wu(a: v4u32) -> v4f32 { - __lsx_vffint_s_wu(a) +pub fn lsx_vffint_s_wu(a: v4u32) -> v4f32 { + unsafe { __lsx_vffint_s_wu(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vffint_d_lu(a: v2u64) -> v2f64 { - __lsx_vffint_d_lu(a) +pub fn lsx_vffint_d_lu(a: v2u64) -> v2f64 { + unsafe { __lsx_vffint_d_lu(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vandn_v(a: v16u8, b: v16u8) -> v16u8 { - __lsx_vandn_v(a, b) +pub fn lsx_vandn_v(a: v16u8, b: v16u8) -> v16u8 { + unsafe { __lsx_vandn_v(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vneg_b(a: v16i8) -> v16i8 { - __lsx_vneg_b(a) +pub fn lsx_vneg_b(a: v16i8) -> v16i8 { + unsafe { __lsx_vneg_b(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vneg_h(a: v8i16) -> v8i16 { - __lsx_vneg_h(a) +pub fn lsx_vneg_h(a: v8i16) -> v8i16 { + unsafe { __lsx_vneg_h(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vneg_w(a: v4i32) -> v4i32 { - __lsx_vneg_w(a) +pub fn lsx_vneg_w(a: v4i32) -> v4i32 { + unsafe { __lsx_vneg_w(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vneg_d(a: v2i64) -> v2i64 { - __lsx_vneg_d(a) +pub fn lsx_vneg_d(a: v2i64) -> v2i64 { + unsafe { __lsx_vneg_d(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmuh_b(a: v16i8, b: v16i8) -> v16i8 { - __lsx_vmuh_b(a, b) +pub fn lsx_vmuh_b(a: v16i8, b: v16i8) -> v16i8 { + unsafe { __lsx_vmuh_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmuh_h(a: v8i16, b: v8i16) -> v8i16 { - __lsx_vmuh_h(a, b) +pub fn lsx_vmuh_h(a: v8i16, b: v8i16) -> v8i16 { + unsafe { __lsx_vmuh_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmuh_w(a: v4i32, b: v4i32) -> v4i32 { - __lsx_vmuh_w(a, b) +pub fn lsx_vmuh_w(a: v4i32, b: v4i32) -> v4i32 { + unsafe { __lsx_vmuh_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmuh_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vmuh_d(a, b) +pub fn lsx_vmuh_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vmuh_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmuh_bu(a: v16u8, b: v16u8) -> v16u8 { - __lsx_vmuh_bu(a, b) +pub fn lsx_vmuh_bu(a: v16u8, b: v16u8) -> v16u8 { + unsafe { __lsx_vmuh_bu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmuh_hu(a: v8u16, b: v8u16) -> v8u16 { - __lsx_vmuh_hu(a, b) +pub fn lsx_vmuh_hu(a: v8u16, b: v8u16) -> v8u16 { + unsafe { __lsx_vmuh_hu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmuh_wu(a: v4u32, b: v4u32) -> v4u32 { - __lsx_vmuh_wu(a, b) +pub fn lsx_vmuh_wu(a: v4u32, b: v4u32) -> v4u32 { + unsafe { __lsx_vmuh_wu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmuh_du(a: v2u64, b: v2u64) -> v2u64 { - __lsx_vmuh_du(a, b) +pub fn lsx_vmuh_du(a: v2u64, b: v2u64) -> v2u64 { + unsafe { __lsx_vmuh_du(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsllwil_h_b(a: v16i8) -> v8i16 { +pub fn lsx_vsllwil_h_b(a: v16i8) -> v8i16 { static_assert_uimm_bits!(IMM3, 3); - __lsx_vsllwil_h_b(a, IMM3) + unsafe { __lsx_vsllwil_h_b(a, IMM3) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsllwil_w_h(a: v8i16) -> v4i32 { +pub fn lsx_vsllwil_w_h(a: v8i16) -> v4i32 { static_assert_uimm_bits!(IMM4, 4); - __lsx_vsllwil_w_h(a, IMM4) + unsafe { __lsx_vsllwil_w_h(a, IMM4) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsllwil_d_w(a: v4i32) -> v2i64 { +pub fn lsx_vsllwil_d_w(a: v4i32) -> v2i64 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vsllwil_d_w(a, IMM5) + unsafe { __lsx_vsllwil_d_w(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsllwil_hu_bu(a: v16u8) -> v8u16 { +pub fn lsx_vsllwil_hu_bu(a: v16u8) -> v8u16 { static_assert_uimm_bits!(IMM3, 3); - __lsx_vsllwil_hu_bu(a, IMM3) + unsafe { __lsx_vsllwil_hu_bu(a, IMM3) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsllwil_wu_hu(a: v8u16) -> v4u32 { +pub fn lsx_vsllwil_wu_hu(a: v8u16) -> v4u32 { static_assert_uimm_bits!(IMM4, 4); - __lsx_vsllwil_wu_hu(a, IMM4) + unsafe { __lsx_vsllwil_wu_hu(a, IMM4) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsllwil_du_wu(a: v4u32) -> v2u64 { +pub fn lsx_vsllwil_du_wu(a: v4u32) -> v2u64 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vsllwil_du_wu(a, IMM5) + unsafe { __lsx_vsllwil_du_wu(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsran_b_h(a: v8i16, b: v8i16) -> v16i8 { - __lsx_vsran_b_h(a, b) +pub fn lsx_vsran_b_h(a: v8i16, b: v8i16) -> v16i8 { + unsafe { __lsx_vsran_b_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsran_h_w(a: v4i32, b: v4i32) -> v8i16 { - __lsx_vsran_h_w(a, b) +pub fn lsx_vsran_h_w(a: v4i32, b: v4i32) -> v8i16 { + unsafe { __lsx_vsran_h_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsran_w_d(a: v2i64, b: v2i64) -> v4i32 { - __lsx_vsran_w_d(a, b) +pub fn lsx_vsran_w_d(a: v2i64, b: v2i64) -> v4i32 { + unsafe { __lsx_vsran_w_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssran_b_h(a: v8i16, b: v8i16) -> v16i8 { - __lsx_vssran_b_h(a, b) +pub fn lsx_vssran_b_h(a: v8i16, b: v8i16) -> v16i8 { + unsafe { __lsx_vssran_b_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssran_h_w(a: v4i32, b: v4i32) -> v8i16 { - __lsx_vssran_h_w(a, b) +pub fn lsx_vssran_h_w(a: v4i32, b: v4i32) -> v8i16 { + unsafe { __lsx_vssran_h_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssran_w_d(a: v2i64, b: v2i64) -> v4i32 { - __lsx_vssran_w_d(a, b) +pub fn lsx_vssran_w_d(a: v2i64, b: v2i64) -> v4i32 { + unsafe { __lsx_vssran_w_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssran_bu_h(a: v8u16, b: v8u16) -> v16u8 { - __lsx_vssran_bu_h(a, b) +pub fn lsx_vssran_bu_h(a: v8u16, b: v8u16) -> v16u8 { + unsafe { __lsx_vssran_bu_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssran_hu_w(a: v4u32, b: v4u32) -> v8u16 { - __lsx_vssran_hu_w(a, b) +pub fn lsx_vssran_hu_w(a: v4u32, b: v4u32) -> v8u16 { + unsafe { __lsx_vssran_hu_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssran_wu_d(a: v2u64, b: v2u64) -> v4u32 { - __lsx_vssran_wu_d(a, b) +pub fn lsx_vssran_wu_d(a: v2u64, b: v2u64) -> v4u32 { + unsafe { __lsx_vssran_wu_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrarn_b_h(a: v8i16, b: v8i16) -> v16i8 { - __lsx_vsrarn_b_h(a, b) +pub fn lsx_vsrarn_b_h(a: v8i16, b: v8i16) -> v16i8 { + unsafe { __lsx_vsrarn_b_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrarn_h_w(a: v4i32, b: v4i32) -> v8i16 { - __lsx_vsrarn_h_w(a, b) +pub fn lsx_vsrarn_h_w(a: v4i32, b: v4i32) -> v8i16 { + unsafe { __lsx_vsrarn_h_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrarn_w_d(a: v2i64, b: v2i64) -> v4i32 { - __lsx_vsrarn_w_d(a, b) +pub fn lsx_vsrarn_w_d(a: v2i64, b: v2i64) -> v4i32 { + unsafe { __lsx_vsrarn_w_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrarn_b_h(a: v8i16, b: v8i16) -> v16i8 { - __lsx_vssrarn_b_h(a, b) +pub fn lsx_vssrarn_b_h(a: v8i16, b: v8i16) -> v16i8 { + unsafe { __lsx_vssrarn_b_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrarn_h_w(a: v4i32, b: v4i32) -> v8i16 { - __lsx_vssrarn_h_w(a, b) +pub fn lsx_vssrarn_h_w(a: v4i32, b: v4i32) -> v8i16 { + unsafe { __lsx_vssrarn_h_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrarn_w_d(a: v2i64, b: v2i64) -> v4i32 { - __lsx_vssrarn_w_d(a, b) +pub fn lsx_vssrarn_w_d(a: v2i64, b: v2i64) -> v4i32 { + unsafe { __lsx_vssrarn_w_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrarn_bu_h(a: v8u16, b: v8u16) -> v16u8 { - __lsx_vssrarn_bu_h(a, b) +pub fn lsx_vssrarn_bu_h(a: v8u16, b: v8u16) -> v16u8 { + unsafe { __lsx_vssrarn_bu_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrarn_hu_w(a: v4u32, b: v4u32) -> v8u16 { - __lsx_vssrarn_hu_w(a, b) +pub fn lsx_vssrarn_hu_w(a: v4u32, b: v4u32) -> v8u16 { + unsafe { __lsx_vssrarn_hu_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrarn_wu_d(a: v2u64, b: v2u64) -> v4u32 { - __lsx_vssrarn_wu_d(a, b) +pub fn lsx_vssrarn_wu_d(a: v2u64, b: v2u64) -> v4u32 { + unsafe { __lsx_vssrarn_wu_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrln_b_h(a: v8i16, b: v8i16) -> v16i8 { - __lsx_vsrln_b_h(a, b) +pub fn lsx_vsrln_b_h(a: v8i16, b: v8i16) -> v16i8 { + unsafe { __lsx_vsrln_b_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrln_h_w(a: v4i32, b: v4i32) -> v8i16 { - __lsx_vsrln_h_w(a, b) +pub fn lsx_vsrln_h_w(a: v4i32, b: v4i32) -> v8i16 { + unsafe { __lsx_vsrln_h_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrln_w_d(a: v2i64, b: v2i64) -> v4i32 { - __lsx_vsrln_w_d(a, b) +pub fn lsx_vsrln_w_d(a: v2i64, b: v2i64) -> v4i32 { + unsafe { __lsx_vsrln_w_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrln_bu_h(a: v8u16, b: v8u16) -> v16u8 { - __lsx_vssrln_bu_h(a, b) +pub fn lsx_vssrln_bu_h(a: v8u16, b: v8u16) -> v16u8 { + unsafe { __lsx_vssrln_bu_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrln_hu_w(a: v4u32, b: v4u32) -> v8u16 { - __lsx_vssrln_hu_w(a, b) +pub fn lsx_vssrln_hu_w(a: v4u32, b: v4u32) -> v8u16 { + unsafe { __lsx_vssrln_hu_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrln_wu_d(a: v2u64, b: v2u64) -> v4u32 { - __lsx_vssrln_wu_d(a, b) +pub fn lsx_vssrln_wu_d(a: v2u64, b: v2u64) -> v4u32 { + unsafe { __lsx_vssrln_wu_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrlrn_b_h(a: v8i16, b: v8i16) -> v16i8 { - __lsx_vsrlrn_b_h(a, b) +pub fn lsx_vsrlrn_b_h(a: v8i16, b: v8i16) -> v16i8 { + unsafe { __lsx_vsrlrn_b_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrlrn_h_w(a: v4i32, b: v4i32) -> v8i16 { - __lsx_vsrlrn_h_w(a, b) +pub fn lsx_vsrlrn_h_w(a: v4i32, b: v4i32) -> v8i16 { + unsafe { __lsx_vsrlrn_h_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrlrn_w_d(a: v2i64, b: v2i64) -> v4i32 { - __lsx_vsrlrn_w_d(a, b) +pub fn lsx_vsrlrn_w_d(a: v2i64, b: v2i64) -> v4i32 { + unsafe { __lsx_vsrlrn_w_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrlrn_bu_h(a: v8u16, b: v8u16) -> v16u8 { - __lsx_vssrlrn_bu_h(a, b) +pub fn lsx_vssrlrn_bu_h(a: v8u16, b: v8u16) -> v16u8 { + unsafe { __lsx_vssrlrn_bu_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrlrn_hu_w(a: v4u32, b: v4u32) -> v8u16 { - __lsx_vssrlrn_hu_w(a, b) +pub fn lsx_vssrlrn_hu_w(a: v4u32, b: v4u32) -> v8u16 { + unsafe { __lsx_vssrlrn_hu_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrlrn_wu_d(a: v2u64, b: v2u64) -> v4u32 { - __lsx_vssrlrn_wu_d(a, b) +pub fn lsx_vssrlrn_wu_d(a: v2u64, b: v2u64) -> v4u32 { + unsafe { __lsx_vssrlrn_wu_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfrstpi_b(a: v16i8, b: v16i8) -> v16i8 { +pub fn lsx_vfrstpi_b(a: v16i8, b: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vfrstpi_b(a, b, IMM5) + unsafe { __lsx_vfrstpi_b(a, b, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfrstpi_h(a: v8i16, b: v8i16) -> v8i16 { +pub fn lsx_vfrstpi_h(a: v8i16, b: v8i16) -> v8i16 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vfrstpi_h(a, b, IMM5) + unsafe { __lsx_vfrstpi_h(a, b, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfrstp_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8 { - __lsx_vfrstp_b(a, b, c) +pub fn lsx_vfrstp_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8 { + unsafe { __lsx_vfrstp_b(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfrstp_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 { - __lsx_vfrstp_h(a, b, c) +pub fn lsx_vfrstp_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 { + unsafe { __lsx_vfrstp_h(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vshuf4i_d(a: v2i64, b: v2i64) -> v2i64 { +pub fn lsx_vshuf4i_d(a: v2i64, b: v2i64) -> v2i64 { static_assert_uimm_bits!(IMM8, 8); - __lsx_vshuf4i_d(a, b, IMM8) + unsafe { __lsx_vshuf4i_d(a, b, IMM8) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vbsrl_v(a: v16i8) -> v16i8 { +pub fn lsx_vbsrl_v(a: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vbsrl_v(a, IMM5) + unsafe { __lsx_vbsrl_v(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vbsll_v(a: v16i8) -> v16i8 { +pub fn lsx_vbsll_v(a: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vbsll_v(a, IMM5) + unsafe { __lsx_vbsll_v(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vextrins_b(a: v16i8, b: v16i8) -> v16i8 { +pub fn lsx_vextrins_b(a: v16i8, b: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM8, 8); - __lsx_vextrins_b(a, b, IMM8) + unsafe { __lsx_vextrins_b(a, b, IMM8) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vextrins_h(a: v8i16, b: v8i16) -> v8i16 { +pub fn lsx_vextrins_h(a: v8i16, b: v8i16) -> v8i16 { static_assert_uimm_bits!(IMM8, 8); - __lsx_vextrins_h(a, b, IMM8) + unsafe { __lsx_vextrins_h(a, b, IMM8) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vextrins_w(a: v4i32, b: v4i32) -> v4i32 { +pub fn lsx_vextrins_w(a: v4i32, b: v4i32) -> v4i32 { static_assert_uimm_bits!(IMM8, 8); - __lsx_vextrins_w(a, b, IMM8) + unsafe { __lsx_vextrins_w(a, b, IMM8) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vextrins_d(a: v2i64, b: v2i64) -> v2i64 { +pub fn lsx_vextrins_d(a: v2i64, b: v2i64) -> v2i64 { static_assert_uimm_bits!(IMM8, 8); - __lsx_vextrins_d(a, b, IMM8) + unsafe { __lsx_vextrins_d(a, b, IMM8) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmskltz_b(a: v16i8) -> v16i8 { - __lsx_vmskltz_b(a) +pub fn lsx_vmskltz_b(a: v16i8) -> v16i8 { + unsafe { __lsx_vmskltz_b(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmskltz_h(a: v8i16) -> v8i16 { - __lsx_vmskltz_h(a) +pub fn lsx_vmskltz_h(a: v8i16) -> v8i16 { + unsafe { __lsx_vmskltz_h(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmskltz_w(a: v4i32) -> v4i32 { - __lsx_vmskltz_w(a) +pub fn lsx_vmskltz_w(a: v4i32) -> v4i32 { + unsafe { __lsx_vmskltz_w(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmskltz_d(a: v2i64) -> v2i64 { - __lsx_vmskltz_d(a) +pub fn lsx_vmskltz_d(a: v2i64) -> v2i64 { + unsafe { __lsx_vmskltz_d(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsigncov_b(a: v16i8, b: v16i8) -> v16i8 { - __lsx_vsigncov_b(a, b) +pub fn lsx_vsigncov_b(a: v16i8, b: v16i8) -> v16i8 { + unsafe { __lsx_vsigncov_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsigncov_h(a: v8i16, b: v8i16) -> v8i16 { - __lsx_vsigncov_h(a, b) +pub fn lsx_vsigncov_h(a: v8i16, b: v8i16) -> v8i16 { + unsafe { __lsx_vsigncov_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsigncov_w(a: v4i32, b: v4i32) -> v4i32 { - __lsx_vsigncov_w(a, b) +pub fn lsx_vsigncov_w(a: v4i32, b: v4i32) -> v4i32 { + unsafe { __lsx_vsigncov_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsigncov_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vsigncov_d(a, b) +pub fn lsx_vsigncov_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vsigncov_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfmadd_s(a: v4f32, b: v4f32, c: v4f32) -> v4f32 { - __lsx_vfmadd_s(a, b, c) +pub fn lsx_vfmadd_s(a: v4f32, b: v4f32, c: v4f32) -> v4f32 { + unsafe { __lsx_vfmadd_s(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfmadd_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64 { - __lsx_vfmadd_d(a, b, c) +pub fn lsx_vfmadd_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64 { + unsafe { __lsx_vfmadd_d(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfmsub_s(a: v4f32, b: v4f32, c: v4f32) -> v4f32 { - __lsx_vfmsub_s(a, b, c) +pub fn lsx_vfmsub_s(a: v4f32, b: v4f32, c: v4f32) -> v4f32 { + unsafe { __lsx_vfmsub_s(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfmsub_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64 { - __lsx_vfmsub_d(a, b, c) +pub fn lsx_vfmsub_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64 { + unsafe { __lsx_vfmsub_d(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfnmadd_s(a: v4f32, b: v4f32, c: v4f32) -> v4f32 { - __lsx_vfnmadd_s(a, b, c) +pub fn lsx_vfnmadd_s(a: v4f32, b: v4f32, c: v4f32) -> v4f32 { + unsafe { __lsx_vfnmadd_s(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfnmadd_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64 { - __lsx_vfnmadd_d(a, b, c) +pub fn lsx_vfnmadd_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64 { + unsafe { __lsx_vfnmadd_d(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfnmsub_s(a: v4f32, b: v4f32, c: v4f32) -> v4f32 { - __lsx_vfnmsub_s(a, b, c) +pub fn lsx_vfnmsub_s(a: v4f32, b: v4f32, c: v4f32) -> v4f32 { + unsafe { __lsx_vfnmsub_s(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfnmsub_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64 { - __lsx_vfnmsub_d(a, b, c) +pub fn lsx_vfnmsub_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64 { + unsafe { __lsx_vfnmsub_d(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vftintrne_w_s(a: v4f32) -> v4i32 { - __lsx_vftintrne_w_s(a) +pub fn lsx_vftintrne_w_s(a: v4f32) -> v4i32 { + unsafe { __lsx_vftintrne_w_s(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vftintrne_l_d(a: v2f64) -> v2i64 { - __lsx_vftintrne_l_d(a) +pub fn lsx_vftintrne_l_d(a: v2f64) -> v2i64 { + unsafe { __lsx_vftintrne_l_d(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vftintrp_w_s(a: v4f32) -> v4i32 { - __lsx_vftintrp_w_s(a) +pub fn lsx_vftintrp_w_s(a: v4f32) -> v4i32 { + unsafe { __lsx_vftintrp_w_s(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vftintrp_l_d(a: v2f64) -> v2i64 { - __lsx_vftintrp_l_d(a) +pub fn lsx_vftintrp_l_d(a: v2f64) -> v2i64 { + unsafe { __lsx_vftintrp_l_d(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vftintrm_w_s(a: v4f32) -> v4i32 { - __lsx_vftintrm_w_s(a) +pub fn lsx_vftintrm_w_s(a: v4f32) -> v4i32 { + unsafe { __lsx_vftintrm_w_s(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vftintrm_l_d(a: v2f64) -> v2i64 { - __lsx_vftintrm_l_d(a) +pub fn lsx_vftintrm_l_d(a: v2f64) -> v2i64 { + unsafe { __lsx_vftintrm_l_d(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vftint_w_d(a: v2f64, b: v2f64) -> v4i32 { - __lsx_vftint_w_d(a, b) +pub fn lsx_vftint_w_d(a: v2f64, b: v2f64) -> v4i32 { + unsafe { __lsx_vftint_w_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vffint_s_l(a: v2i64, b: v2i64) -> v4f32 { - __lsx_vffint_s_l(a, b) +pub fn lsx_vffint_s_l(a: v2i64, b: v2i64) -> v4f32 { + unsafe { __lsx_vffint_s_l(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vftintrz_w_d(a: v2f64, b: v2f64) -> v4i32 { - __lsx_vftintrz_w_d(a, b) +pub fn lsx_vftintrz_w_d(a: v2f64, b: v2f64) -> v4i32 { + unsafe { __lsx_vftintrz_w_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vftintrp_w_d(a: v2f64, b: v2f64) -> v4i32 { - __lsx_vftintrp_w_d(a, b) +pub fn lsx_vftintrp_w_d(a: v2f64, b: v2f64) -> v4i32 { + unsafe { __lsx_vftintrp_w_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vftintrm_w_d(a: v2f64, b: v2f64) -> v4i32 { - __lsx_vftintrm_w_d(a, b) +pub fn lsx_vftintrm_w_d(a: v2f64, b: v2f64) -> v4i32 { + unsafe { __lsx_vftintrm_w_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vftintrne_w_d(a: v2f64, b: v2f64) -> v4i32 { - __lsx_vftintrne_w_d(a, b) +pub fn lsx_vftintrne_w_d(a: v2f64, b: v2f64) -> v4i32 { + unsafe { __lsx_vftintrne_w_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vftintl_l_s(a: v4f32) -> v2i64 { - __lsx_vftintl_l_s(a) +pub fn lsx_vftintl_l_s(a: v4f32) -> v2i64 { + unsafe { __lsx_vftintl_l_s(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vftinth_l_s(a: v4f32) -> v2i64 { - __lsx_vftinth_l_s(a) +pub fn lsx_vftinth_l_s(a: v4f32) -> v2i64 { + unsafe { __lsx_vftinth_l_s(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vffinth_d_w(a: v4i32) -> v2f64 { - __lsx_vffinth_d_w(a) +pub fn lsx_vffinth_d_w(a: v4i32) -> v2f64 { + unsafe { __lsx_vffinth_d_w(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vffintl_d_w(a: v4i32) -> v2f64 { - __lsx_vffintl_d_w(a) +pub fn lsx_vffintl_d_w(a: v4i32) -> v2f64 { + unsafe { __lsx_vffintl_d_w(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vftintrzl_l_s(a: v4f32) -> v2i64 { - __lsx_vftintrzl_l_s(a) +pub fn lsx_vftintrzl_l_s(a: v4f32) -> v2i64 { + unsafe { __lsx_vftintrzl_l_s(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vftintrzh_l_s(a: v4f32) -> v2i64 { - __lsx_vftintrzh_l_s(a) +pub fn lsx_vftintrzh_l_s(a: v4f32) -> v2i64 { + unsafe { __lsx_vftintrzh_l_s(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vftintrpl_l_s(a: v4f32) -> v2i64 { - __lsx_vftintrpl_l_s(a) +pub fn lsx_vftintrpl_l_s(a: v4f32) -> v2i64 { + unsafe { __lsx_vftintrpl_l_s(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vftintrph_l_s(a: v4f32) -> v2i64 { - __lsx_vftintrph_l_s(a) +pub fn lsx_vftintrph_l_s(a: v4f32) -> v2i64 { + unsafe { __lsx_vftintrph_l_s(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vftintrml_l_s(a: v4f32) -> v2i64 { - __lsx_vftintrml_l_s(a) +pub fn lsx_vftintrml_l_s(a: v4f32) -> v2i64 { + unsafe { __lsx_vftintrml_l_s(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vftintrmh_l_s(a: v4f32) -> v2i64 { - __lsx_vftintrmh_l_s(a) +pub fn lsx_vftintrmh_l_s(a: v4f32) -> v2i64 { + unsafe { __lsx_vftintrmh_l_s(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vftintrnel_l_s(a: v4f32) -> v2i64 { - __lsx_vftintrnel_l_s(a) +pub fn lsx_vftintrnel_l_s(a: v4f32) -> v2i64 { + unsafe { __lsx_vftintrnel_l_s(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vftintrneh_l_s(a: v4f32) -> v2i64 { - __lsx_vftintrneh_l_s(a) +pub fn lsx_vftintrneh_l_s(a: v4f32) -> v2i64 { + unsafe { __lsx_vftintrneh_l_s(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfrintrne_s(a: v4f32) -> v4f32 { - __lsx_vfrintrne_s(a) +pub fn lsx_vfrintrne_s(a: v4f32) -> v4f32 { + unsafe { __lsx_vfrintrne_s(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfrintrne_d(a: v2f64) -> v2f64 { - __lsx_vfrintrne_d(a) +pub fn lsx_vfrintrne_d(a: v2f64) -> v2f64 { + unsafe { __lsx_vfrintrne_d(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfrintrz_s(a: v4f32) -> v4f32 { - __lsx_vfrintrz_s(a) +pub fn lsx_vfrintrz_s(a: v4f32) -> v4f32 { + unsafe { __lsx_vfrintrz_s(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfrintrz_d(a: v2f64) -> v2f64 { - __lsx_vfrintrz_d(a) +pub fn lsx_vfrintrz_d(a: v2f64) -> v2f64 { + unsafe { __lsx_vfrintrz_d(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfrintrp_s(a: v4f32) -> v4f32 { - __lsx_vfrintrp_s(a) +pub fn lsx_vfrintrp_s(a: v4f32) -> v4f32 { + unsafe { __lsx_vfrintrp_s(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfrintrp_d(a: v2f64) -> v2f64 { - __lsx_vfrintrp_d(a) +pub fn lsx_vfrintrp_d(a: v2f64) -> v2f64 { + unsafe { __lsx_vfrintrp_d(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfrintrm_s(a: v4f32) -> v4f32 { - __lsx_vfrintrm_s(a) +pub fn lsx_vfrintrm_s(a: v4f32) -> v4f32 { + unsafe { __lsx_vfrintrm_s(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfrintrm_d(a: v2f64) -> v2f64 { - __lsx_vfrintrm_d(a) +pub fn lsx_vfrintrm_d(a: v2f64) -> v2f64 { + unsafe { __lsx_vfrintrm_d(a) } } #[inline] @@ -5087,687 +5087,687 @@ pub unsafe fn lsx_vstelm_d(a: v2i64, mem_add #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vaddwev_d_w(a: v4i32, b: v4i32) -> v2i64 { - __lsx_vaddwev_d_w(a, b) +pub fn lsx_vaddwev_d_w(a: v4i32, b: v4i32) -> v2i64 { + unsafe { __lsx_vaddwev_d_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vaddwev_w_h(a: v8i16, b: v8i16) -> v4i32 { - __lsx_vaddwev_w_h(a, b) +pub fn lsx_vaddwev_w_h(a: v8i16, b: v8i16) -> v4i32 { + unsafe { __lsx_vaddwev_w_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vaddwev_h_b(a: v16i8, b: v16i8) -> v8i16 { - __lsx_vaddwev_h_b(a, b) +pub fn lsx_vaddwev_h_b(a: v16i8, b: v16i8) -> v8i16 { + unsafe { __lsx_vaddwev_h_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vaddwod_d_w(a: v4i32, b: v4i32) -> v2i64 { - __lsx_vaddwod_d_w(a, b) +pub fn lsx_vaddwod_d_w(a: v4i32, b: v4i32) -> v2i64 { + unsafe { __lsx_vaddwod_d_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vaddwod_w_h(a: v8i16, b: v8i16) -> v4i32 { - __lsx_vaddwod_w_h(a, b) +pub fn lsx_vaddwod_w_h(a: v8i16, b: v8i16) -> v4i32 { + unsafe { __lsx_vaddwod_w_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vaddwod_h_b(a: v16i8, b: v16i8) -> v8i16 { - __lsx_vaddwod_h_b(a, b) +pub fn lsx_vaddwod_h_b(a: v16i8, b: v16i8) -> v8i16 { + unsafe { __lsx_vaddwod_h_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vaddwev_d_wu(a: v4u32, b: v4u32) -> v2i64 { - __lsx_vaddwev_d_wu(a, b) +pub fn lsx_vaddwev_d_wu(a: v4u32, b: v4u32) -> v2i64 { + unsafe { __lsx_vaddwev_d_wu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vaddwev_w_hu(a: v8u16, b: v8u16) -> v4i32 { - __lsx_vaddwev_w_hu(a, b) +pub fn lsx_vaddwev_w_hu(a: v8u16, b: v8u16) -> v4i32 { + unsafe { __lsx_vaddwev_w_hu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vaddwev_h_bu(a: v16u8, b: v16u8) -> v8i16 { - __lsx_vaddwev_h_bu(a, b) +pub fn lsx_vaddwev_h_bu(a: v16u8, b: v16u8) -> v8i16 { + unsafe { __lsx_vaddwev_h_bu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vaddwod_d_wu(a: v4u32, b: v4u32) -> v2i64 { - __lsx_vaddwod_d_wu(a, b) +pub fn lsx_vaddwod_d_wu(a: v4u32, b: v4u32) -> v2i64 { + unsafe { __lsx_vaddwod_d_wu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vaddwod_w_hu(a: v8u16, b: v8u16) -> v4i32 { - __lsx_vaddwod_w_hu(a, b) +pub fn lsx_vaddwod_w_hu(a: v8u16, b: v8u16) -> v4i32 { + unsafe { __lsx_vaddwod_w_hu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vaddwod_h_bu(a: v16u8, b: v16u8) -> v8i16 { - __lsx_vaddwod_h_bu(a, b) +pub fn lsx_vaddwod_h_bu(a: v16u8, b: v16u8) -> v8i16 { + unsafe { __lsx_vaddwod_h_bu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vaddwev_d_wu_w(a: v4u32, b: v4i32) -> v2i64 { - __lsx_vaddwev_d_wu_w(a, b) +pub fn lsx_vaddwev_d_wu_w(a: v4u32, b: v4i32) -> v2i64 { + unsafe { __lsx_vaddwev_d_wu_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vaddwev_w_hu_h(a: v8u16, b: v8i16) -> v4i32 { - __lsx_vaddwev_w_hu_h(a, b) +pub fn lsx_vaddwev_w_hu_h(a: v8u16, b: v8i16) -> v4i32 { + unsafe { __lsx_vaddwev_w_hu_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vaddwev_h_bu_b(a: v16u8, b: v16i8) -> v8i16 { - __lsx_vaddwev_h_bu_b(a, b) +pub fn lsx_vaddwev_h_bu_b(a: v16u8, b: v16i8) -> v8i16 { + unsafe { __lsx_vaddwev_h_bu_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vaddwod_d_wu_w(a: v4u32, b: v4i32) -> v2i64 { - __lsx_vaddwod_d_wu_w(a, b) +pub fn lsx_vaddwod_d_wu_w(a: v4u32, b: v4i32) -> v2i64 { + unsafe { __lsx_vaddwod_d_wu_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vaddwod_w_hu_h(a: v8u16, b: v8i16) -> v4i32 { - __lsx_vaddwod_w_hu_h(a, b) +pub fn lsx_vaddwod_w_hu_h(a: v8u16, b: v8i16) -> v4i32 { + unsafe { __lsx_vaddwod_w_hu_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vaddwod_h_bu_b(a: v16u8, b: v16i8) -> v8i16 { - __lsx_vaddwod_h_bu_b(a, b) +pub fn lsx_vaddwod_h_bu_b(a: v16u8, b: v16i8) -> v8i16 { + unsafe { __lsx_vaddwod_h_bu_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsubwev_d_w(a: v4i32, b: v4i32) -> v2i64 { - __lsx_vsubwev_d_w(a, b) +pub fn lsx_vsubwev_d_w(a: v4i32, b: v4i32) -> v2i64 { + unsafe { __lsx_vsubwev_d_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsubwev_w_h(a: v8i16, b: v8i16) -> v4i32 { - __lsx_vsubwev_w_h(a, b) +pub fn lsx_vsubwev_w_h(a: v8i16, b: v8i16) -> v4i32 { + unsafe { __lsx_vsubwev_w_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsubwev_h_b(a: v16i8, b: v16i8) -> v8i16 { - __lsx_vsubwev_h_b(a, b) +pub fn lsx_vsubwev_h_b(a: v16i8, b: v16i8) -> v8i16 { + unsafe { __lsx_vsubwev_h_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsubwod_d_w(a: v4i32, b: v4i32) -> v2i64 { - __lsx_vsubwod_d_w(a, b) +pub fn lsx_vsubwod_d_w(a: v4i32, b: v4i32) -> v2i64 { + unsafe { __lsx_vsubwod_d_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsubwod_w_h(a: v8i16, b: v8i16) -> v4i32 { - __lsx_vsubwod_w_h(a, b) +pub fn lsx_vsubwod_w_h(a: v8i16, b: v8i16) -> v4i32 { + unsafe { __lsx_vsubwod_w_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsubwod_h_b(a: v16i8, b: v16i8) -> v8i16 { - __lsx_vsubwod_h_b(a, b) +pub fn lsx_vsubwod_h_b(a: v16i8, b: v16i8) -> v8i16 { + unsafe { __lsx_vsubwod_h_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsubwev_d_wu(a: v4u32, b: v4u32) -> v2i64 { - __lsx_vsubwev_d_wu(a, b) +pub fn lsx_vsubwev_d_wu(a: v4u32, b: v4u32) -> v2i64 { + unsafe { __lsx_vsubwev_d_wu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsubwev_w_hu(a: v8u16, b: v8u16) -> v4i32 { - __lsx_vsubwev_w_hu(a, b) +pub fn lsx_vsubwev_w_hu(a: v8u16, b: v8u16) -> v4i32 { + unsafe { __lsx_vsubwev_w_hu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsubwev_h_bu(a: v16u8, b: v16u8) -> v8i16 { - __lsx_vsubwev_h_bu(a, b) +pub fn lsx_vsubwev_h_bu(a: v16u8, b: v16u8) -> v8i16 { + unsafe { __lsx_vsubwev_h_bu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsubwod_d_wu(a: v4u32, b: v4u32) -> v2i64 { - __lsx_vsubwod_d_wu(a, b) +pub fn lsx_vsubwod_d_wu(a: v4u32, b: v4u32) -> v2i64 { + unsafe { __lsx_vsubwod_d_wu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsubwod_w_hu(a: v8u16, b: v8u16) -> v4i32 { - __lsx_vsubwod_w_hu(a, b) +pub fn lsx_vsubwod_w_hu(a: v8u16, b: v8u16) -> v4i32 { + unsafe { __lsx_vsubwod_w_hu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsubwod_h_bu(a: v16u8, b: v16u8) -> v8i16 { - __lsx_vsubwod_h_bu(a, b) +pub fn lsx_vsubwod_h_bu(a: v16u8, b: v16u8) -> v8i16 { + unsafe { __lsx_vsubwod_h_bu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vaddwev_q_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vaddwev_q_d(a, b) +pub fn lsx_vaddwev_q_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vaddwev_q_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vaddwod_q_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vaddwod_q_d(a, b) +pub fn lsx_vaddwod_q_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vaddwod_q_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vaddwev_q_du(a: v2u64, b: v2u64) -> v2i64 { - __lsx_vaddwev_q_du(a, b) +pub fn lsx_vaddwev_q_du(a: v2u64, b: v2u64) -> v2i64 { + unsafe { __lsx_vaddwev_q_du(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vaddwod_q_du(a: v2u64, b: v2u64) -> v2i64 { - __lsx_vaddwod_q_du(a, b) +pub fn lsx_vaddwod_q_du(a: v2u64, b: v2u64) -> v2i64 { + unsafe { __lsx_vaddwod_q_du(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsubwev_q_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vsubwev_q_d(a, b) +pub fn lsx_vsubwev_q_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vsubwev_q_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsubwod_q_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vsubwod_q_d(a, b) +pub fn lsx_vsubwod_q_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vsubwod_q_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsubwev_q_du(a: v2u64, b: v2u64) -> v2i64 { - __lsx_vsubwev_q_du(a, b) +pub fn lsx_vsubwev_q_du(a: v2u64, b: v2u64) -> v2i64 { + unsafe { __lsx_vsubwev_q_du(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsubwod_q_du(a: v2u64, b: v2u64) -> v2i64 { - __lsx_vsubwod_q_du(a, b) +pub fn lsx_vsubwod_q_du(a: v2u64, b: v2u64) -> v2i64 { + unsafe { __lsx_vsubwod_q_du(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vaddwev_q_du_d(a: v2u64, b: v2i64) -> v2i64 { - __lsx_vaddwev_q_du_d(a, b) +pub fn lsx_vaddwev_q_du_d(a: v2u64, b: v2i64) -> v2i64 { + unsafe { __lsx_vaddwev_q_du_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vaddwod_q_du_d(a: v2u64, b: v2i64) -> v2i64 { - __lsx_vaddwod_q_du_d(a, b) +pub fn lsx_vaddwod_q_du_d(a: v2u64, b: v2i64) -> v2i64 { + unsafe { __lsx_vaddwod_q_du_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmulwev_d_w(a: v4i32, b: v4i32) -> v2i64 { - __lsx_vmulwev_d_w(a, b) +pub fn lsx_vmulwev_d_w(a: v4i32, b: v4i32) -> v2i64 { + unsafe { __lsx_vmulwev_d_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmulwev_w_h(a: v8i16, b: v8i16) -> v4i32 { - __lsx_vmulwev_w_h(a, b) +pub fn lsx_vmulwev_w_h(a: v8i16, b: v8i16) -> v4i32 { + unsafe { __lsx_vmulwev_w_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmulwev_h_b(a: v16i8, b: v16i8) -> v8i16 { - __lsx_vmulwev_h_b(a, b) +pub fn lsx_vmulwev_h_b(a: v16i8, b: v16i8) -> v8i16 { + unsafe { __lsx_vmulwev_h_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmulwod_d_w(a: v4i32, b: v4i32) -> v2i64 { - __lsx_vmulwod_d_w(a, b) +pub fn lsx_vmulwod_d_w(a: v4i32, b: v4i32) -> v2i64 { + unsafe { __lsx_vmulwod_d_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmulwod_w_h(a: v8i16, b: v8i16) -> v4i32 { - __lsx_vmulwod_w_h(a, b) +pub fn lsx_vmulwod_w_h(a: v8i16, b: v8i16) -> v4i32 { + unsafe { __lsx_vmulwod_w_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmulwod_h_b(a: v16i8, b: v16i8) -> v8i16 { - __lsx_vmulwod_h_b(a, b) +pub fn lsx_vmulwod_h_b(a: v16i8, b: v16i8) -> v8i16 { + unsafe { __lsx_vmulwod_h_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmulwev_d_wu(a: v4u32, b: v4u32) -> v2i64 { - __lsx_vmulwev_d_wu(a, b) +pub fn lsx_vmulwev_d_wu(a: v4u32, b: v4u32) -> v2i64 { + unsafe { __lsx_vmulwev_d_wu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmulwev_w_hu(a: v8u16, b: v8u16) -> v4i32 { - __lsx_vmulwev_w_hu(a, b) +pub fn lsx_vmulwev_w_hu(a: v8u16, b: v8u16) -> v4i32 { + unsafe { __lsx_vmulwev_w_hu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmulwev_h_bu(a: v16u8, b: v16u8) -> v8i16 { - __lsx_vmulwev_h_bu(a, b) +pub fn lsx_vmulwev_h_bu(a: v16u8, b: v16u8) -> v8i16 { + unsafe { __lsx_vmulwev_h_bu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmulwod_d_wu(a: v4u32, b: v4u32) -> v2i64 { - __lsx_vmulwod_d_wu(a, b) +pub fn lsx_vmulwod_d_wu(a: v4u32, b: v4u32) -> v2i64 { + unsafe { __lsx_vmulwod_d_wu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmulwod_w_hu(a: v8u16, b: v8u16) -> v4i32 { - __lsx_vmulwod_w_hu(a, b) +pub fn lsx_vmulwod_w_hu(a: v8u16, b: v8u16) -> v4i32 { + unsafe { __lsx_vmulwod_w_hu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmulwod_h_bu(a: v16u8, b: v16u8) -> v8i16 { - __lsx_vmulwod_h_bu(a, b) +pub fn lsx_vmulwod_h_bu(a: v16u8, b: v16u8) -> v8i16 { + unsafe { __lsx_vmulwod_h_bu(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmulwev_d_wu_w(a: v4u32, b: v4i32) -> v2i64 { - __lsx_vmulwev_d_wu_w(a, b) +pub fn lsx_vmulwev_d_wu_w(a: v4u32, b: v4i32) -> v2i64 { + unsafe { __lsx_vmulwev_d_wu_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmulwev_w_hu_h(a: v8u16, b: v8i16) -> v4i32 { - __lsx_vmulwev_w_hu_h(a, b) +pub fn lsx_vmulwev_w_hu_h(a: v8u16, b: v8i16) -> v4i32 { + unsafe { __lsx_vmulwev_w_hu_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmulwev_h_bu_b(a: v16u8, b: v16i8) -> v8i16 { - __lsx_vmulwev_h_bu_b(a, b) +pub fn lsx_vmulwev_h_bu_b(a: v16u8, b: v16i8) -> v8i16 { + unsafe { __lsx_vmulwev_h_bu_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmulwod_d_wu_w(a: v4u32, b: v4i32) -> v2i64 { - __lsx_vmulwod_d_wu_w(a, b) +pub fn lsx_vmulwod_d_wu_w(a: v4u32, b: v4i32) -> v2i64 { + unsafe { __lsx_vmulwod_d_wu_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmulwod_w_hu_h(a: v8u16, b: v8i16) -> v4i32 { - __lsx_vmulwod_w_hu_h(a, b) +pub fn lsx_vmulwod_w_hu_h(a: v8u16, b: v8i16) -> v4i32 { + unsafe { __lsx_vmulwod_w_hu_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmulwod_h_bu_b(a: v16u8, b: v16i8) -> v8i16 { - __lsx_vmulwod_h_bu_b(a, b) +pub fn lsx_vmulwod_h_bu_b(a: v16u8, b: v16i8) -> v8i16 { + unsafe { __lsx_vmulwod_h_bu_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmulwev_q_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vmulwev_q_d(a, b) +pub fn lsx_vmulwev_q_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vmulwev_q_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmulwod_q_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vmulwod_q_d(a, b) +pub fn lsx_vmulwod_q_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vmulwod_q_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmulwev_q_du(a: v2u64, b: v2u64) -> v2i64 { - __lsx_vmulwev_q_du(a, b) +pub fn lsx_vmulwev_q_du(a: v2u64, b: v2u64) -> v2i64 { + unsafe { __lsx_vmulwev_q_du(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmulwod_q_du(a: v2u64, b: v2u64) -> v2i64 { - __lsx_vmulwod_q_du(a, b) +pub fn lsx_vmulwod_q_du(a: v2u64, b: v2u64) -> v2i64 { + unsafe { __lsx_vmulwod_q_du(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmulwev_q_du_d(a: v2u64, b: v2i64) -> v2i64 { - __lsx_vmulwev_q_du_d(a, b) +pub fn lsx_vmulwev_q_du_d(a: v2u64, b: v2i64) -> v2i64 { + unsafe { __lsx_vmulwev_q_du_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmulwod_q_du_d(a: v2u64, b: v2i64) -> v2i64 { - __lsx_vmulwod_q_du_d(a, b) +pub fn lsx_vmulwod_q_du_d(a: v2u64, b: v2i64) -> v2i64 { + unsafe { __lsx_vmulwod_q_du_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vhaddw_q_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vhaddw_q_d(a, b) +pub fn lsx_vhaddw_q_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vhaddw_q_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vhaddw_qu_du(a: v2u64, b: v2u64) -> v2u64 { - __lsx_vhaddw_qu_du(a, b) +pub fn lsx_vhaddw_qu_du(a: v2u64, b: v2u64) -> v2u64 { + unsafe { __lsx_vhaddw_qu_du(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vhsubw_q_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vhsubw_q_d(a, b) +pub fn lsx_vhsubw_q_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vhsubw_q_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vhsubw_qu_du(a: v2u64, b: v2u64) -> v2u64 { - __lsx_vhsubw_qu_du(a, b) +pub fn lsx_vhsubw_qu_du(a: v2u64, b: v2u64) -> v2u64 { + unsafe { __lsx_vhsubw_qu_du(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmaddwev_d_w(a: v2i64, b: v4i32, c: v4i32) -> v2i64 { - __lsx_vmaddwev_d_w(a, b, c) +pub fn lsx_vmaddwev_d_w(a: v2i64, b: v4i32, c: v4i32) -> v2i64 { + unsafe { __lsx_vmaddwev_d_w(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmaddwev_w_h(a: v4i32, b: v8i16, c: v8i16) -> v4i32 { - __lsx_vmaddwev_w_h(a, b, c) +pub fn lsx_vmaddwev_w_h(a: v4i32, b: v8i16, c: v8i16) -> v4i32 { + unsafe { __lsx_vmaddwev_w_h(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmaddwev_h_b(a: v8i16, b: v16i8, c: v16i8) -> v8i16 { - __lsx_vmaddwev_h_b(a, b, c) +pub fn lsx_vmaddwev_h_b(a: v8i16, b: v16i8, c: v16i8) -> v8i16 { + unsafe { __lsx_vmaddwev_h_b(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmaddwev_d_wu(a: v2u64, b: v4u32, c: v4u32) -> v2u64 { - __lsx_vmaddwev_d_wu(a, b, c) +pub fn lsx_vmaddwev_d_wu(a: v2u64, b: v4u32, c: v4u32) -> v2u64 { + unsafe { __lsx_vmaddwev_d_wu(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmaddwev_w_hu(a: v4u32, b: v8u16, c: v8u16) -> v4u32 { - __lsx_vmaddwev_w_hu(a, b, c) +pub fn lsx_vmaddwev_w_hu(a: v4u32, b: v8u16, c: v8u16) -> v4u32 { + unsafe { __lsx_vmaddwev_w_hu(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmaddwev_h_bu(a: v8u16, b: v16u8, c: v16u8) -> v8u16 { - __lsx_vmaddwev_h_bu(a, b, c) +pub fn lsx_vmaddwev_h_bu(a: v8u16, b: v16u8, c: v16u8) -> v8u16 { + unsafe { __lsx_vmaddwev_h_bu(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmaddwod_d_w(a: v2i64, b: v4i32, c: v4i32) -> v2i64 { - __lsx_vmaddwod_d_w(a, b, c) +pub fn lsx_vmaddwod_d_w(a: v2i64, b: v4i32, c: v4i32) -> v2i64 { + unsafe { __lsx_vmaddwod_d_w(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmaddwod_w_h(a: v4i32, b: v8i16, c: v8i16) -> v4i32 { - __lsx_vmaddwod_w_h(a, b, c) +pub fn lsx_vmaddwod_w_h(a: v4i32, b: v8i16, c: v8i16) -> v4i32 { + unsafe { __lsx_vmaddwod_w_h(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmaddwod_h_b(a: v8i16, b: v16i8, c: v16i8) -> v8i16 { - __lsx_vmaddwod_h_b(a, b, c) +pub fn lsx_vmaddwod_h_b(a: v8i16, b: v16i8, c: v16i8) -> v8i16 { + unsafe { __lsx_vmaddwod_h_b(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmaddwod_d_wu(a: v2u64, b: v4u32, c: v4u32) -> v2u64 { - __lsx_vmaddwod_d_wu(a, b, c) +pub fn lsx_vmaddwod_d_wu(a: v2u64, b: v4u32, c: v4u32) -> v2u64 { + unsafe { __lsx_vmaddwod_d_wu(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmaddwod_w_hu(a: v4u32, b: v8u16, c: v8u16) -> v4u32 { - __lsx_vmaddwod_w_hu(a, b, c) +pub fn lsx_vmaddwod_w_hu(a: v4u32, b: v8u16, c: v8u16) -> v4u32 { + unsafe { __lsx_vmaddwod_w_hu(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmaddwod_h_bu(a: v8u16, b: v16u8, c: v16u8) -> v8u16 { - __lsx_vmaddwod_h_bu(a, b, c) +pub fn lsx_vmaddwod_h_bu(a: v8u16, b: v16u8, c: v16u8) -> v8u16 { + unsafe { __lsx_vmaddwod_h_bu(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmaddwev_d_wu_w(a: v2i64, b: v4u32, c: v4i32) -> v2i64 { - __lsx_vmaddwev_d_wu_w(a, b, c) +pub fn lsx_vmaddwev_d_wu_w(a: v2i64, b: v4u32, c: v4i32) -> v2i64 { + unsafe { __lsx_vmaddwev_d_wu_w(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmaddwev_w_hu_h(a: v4i32, b: v8u16, c: v8i16) -> v4i32 { - __lsx_vmaddwev_w_hu_h(a, b, c) +pub fn lsx_vmaddwev_w_hu_h(a: v4i32, b: v8u16, c: v8i16) -> v4i32 { + unsafe { __lsx_vmaddwev_w_hu_h(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmaddwev_h_bu_b(a: v8i16, b: v16u8, c: v16i8) -> v8i16 { - __lsx_vmaddwev_h_bu_b(a, b, c) +pub fn lsx_vmaddwev_h_bu_b(a: v8i16, b: v16u8, c: v16i8) -> v8i16 { + unsafe { __lsx_vmaddwev_h_bu_b(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmaddwod_d_wu_w(a: v2i64, b: v4u32, c: v4i32) -> v2i64 { - __lsx_vmaddwod_d_wu_w(a, b, c) +pub fn lsx_vmaddwod_d_wu_w(a: v2i64, b: v4u32, c: v4i32) -> v2i64 { + unsafe { __lsx_vmaddwod_d_wu_w(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmaddwod_w_hu_h(a: v4i32, b: v8u16, c: v8i16) -> v4i32 { - __lsx_vmaddwod_w_hu_h(a, b, c) +pub fn lsx_vmaddwod_w_hu_h(a: v4i32, b: v8u16, c: v8i16) -> v4i32 { + unsafe { __lsx_vmaddwod_w_hu_h(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmaddwod_h_bu_b(a: v8i16, b: v16u8, c: v16i8) -> v8i16 { - __lsx_vmaddwod_h_bu_b(a, b, c) +pub fn lsx_vmaddwod_h_bu_b(a: v8i16, b: v16u8, c: v16i8) -> v8i16 { + unsafe { __lsx_vmaddwod_h_bu_b(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmaddwev_q_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64 { - __lsx_vmaddwev_q_d(a, b, c) +pub fn lsx_vmaddwev_q_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64 { + unsafe { __lsx_vmaddwev_q_d(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmaddwod_q_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64 { - __lsx_vmaddwod_q_d(a, b, c) +pub fn lsx_vmaddwod_q_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64 { + unsafe { __lsx_vmaddwod_q_d(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmaddwev_q_du(a: v2u64, b: v2u64, c: v2u64) -> v2u64 { - __lsx_vmaddwev_q_du(a, b, c) +pub fn lsx_vmaddwev_q_du(a: v2u64, b: v2u64, c: v2u64) -> v2u64 { + unsafe { __lsx_vmaddwev_q_du(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmaddwod_q_du(a: v2u64, b: v2u64, c: v2u64) -> v2u64 { - __lsx_vmaddwod_q_du(a, b, c) +pub fn lsx_vmaddwod_q_du(a: v2u64, b: v2u64, c: v2u64) -> v2u64 { + unsafe { __lsx_vmaddwod_q_du(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmaddwev_q_du_d(a: v2i64, b: v2u64, c: v2i64) -> v2i64 { - __lsx_vmaddwev_q_du_d(a, b, c) +pub fn lsx_vmaddwev_q_du_d(a: v2i64, b: v2u64, c: v2i64) -> v2i64 { + unsafe { __lsx_vmaddwev_q_du_d(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmaddwod_q_du_d(a: v2i64, b: v2u64, c: v2i64) -> v2i64 { - __lsx_vmaddwod_q_du_d(a, b, c) +pub fn lsx_vmaddwod_q_du_d(a: v2i64, b: v2u64, c: v2i64) -> v2i64 { + unsafe { __lsx_vmaddwod_q_du_d(a, b, c) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vrotr_b(a: v16i8, b: v16i8) -> v16i8 { - __lsx_vrotr_b(a, b) +pub fn lsx_vrotr_b(a: v16i8, b: v16i8) -> v16i8 { + unsafe { __lsx_vrotr_b(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vrotr_h(a: v8i16, b: v8i16) -> v8i16 { - __lsx_vrotr_h(a, b) +pub fn lsx_vrotr_h(a: v8i16, b: v8i16) -> v8i16 { + unsafe { __lsx_vrotr_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vrotr_w(a: v4i32, b: v4i32) -> v4i32 { - __lsx_vrotr_w(a, b) +pub fn lsx_vrotr_w(a: v4i32, b: v4i32) -> v4i32 { + unsafe { __lsx_vrotr_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vrotr_d(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vrotr_d(a, b) +pub fn lsx_vrotr_d(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vrotr_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vadd_q(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vadd_q(a, b) +pub fn lsx_vadd_q(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vadd_q(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsub_q(a: v2i64, b: v2i64) -> v2i64 { - __lsx_vsub_q(a, b) +pub fn lsx_vsub_q(a: v2i64, b: v2i64) -> v2i64 { + unsafe { __lsx_vsub_q(a, b) } } #[inline] @@ -5809,555 +5809,555 @@ pub unsafe fn lsx_vldrepl_d(mem_addr: *const i8) -> v2i64 { #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmskgez_b(a: v16i8) -> v16i8 { - __lsx_vmskgez_b(a) +pub fn lsx_vmskgez_b(a: v16i8) -> v16i8 { + unsafe { __lsx_vmskgez_b(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vmsknz_b(a: v16i8) -> v16i8 { - __lsx_vmsknz_b(a) +pub fn lsx_vmsknz_b(a: v16i8) -> v16i8 { + unsafe { __lsx_vmsknz_b(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vexth_h_b(a: v16i8) -> v8i16 { - __lsx_vexth_h_b(a) +pub fn lsx_vexth_h_b(a: v16i8) -> v8i16 { + unsafe { __lsx_vexth_h_b(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vexth_w_h(a: v8i16) -> v4i32 { - __lsx_vexth_w_h(a) +pub fn lsx_vexth_w_h(a: v8i16) -> v4i32 { + unsafe { __lsx_vexth_w_h(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vexth_d_w(a: v4i32) -> v2i64 { - __lsx_vexth_d_w(a) +pub fn lsx_vexth_d_w(a: v4i32) -> v2i64 { + unsafe { __lsx_vexth_d_w(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vexth_q_d(a: v2i64) -> v2i64 { - __lsx_vexth_q_d(a) +pub fn lsx_vexth_q_d(a: v2i64) -> v2i64 { + unsafe { __lsx_vexth_q_d(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vexth_hu_bu(a: v16u8) -> v8u16 { - __lsx_vexth_hu_bu(a) +pub fn lsx_vexth_hu_bu(a: v16u8) -> v8u16 { + unsafe { __lsx_vexth_hu_bu(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vexth_wu_hu(a: v8u16) -> v4u32 { - __lsx_vexth_wu_hu(a) +pub fn lsx_vexth_wu_hu(a: v8u16) -> v4u32 { + unsafe { __lsx_vexth_wu_hu(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vexth_du_wu(a: v4u32) -> v2u64 { - __lsx_vexth_du_wu(a) +pub fn lsx_vexth_du_wu(a: v4u32) -> v2u64 { + unsafe { __lsx_vexth_du_wu(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vexth_qu_du(a: v2u64) -> v2u64 { - __lsx_vexth_qu_du(a) +pub fn lsx_vexth_qu_du(a: v2u64) -> v2u64 { + unsafe { __lsx_vexth_qu_du(a) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vrotri_b(a: v16i8) -> v16i8 { +pub fn lsx_vrotri_b(a: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM3, 3); - __lsx_vrotri_b(a, IMM3) + unsafe { __lsx_vrotri_b(a, IMM3) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vrotri_h(a: v8i16) -> v8i16 { +pub fn lsx_vrotri_h(a: v8i16) -> v8i16 { static_assert_uimm_bits!(IMM4, 4); - __lsx_vrotri_h(a, IMM4) + unsafe { __lsx_vrotri_h(a, IMM4) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vrotri_w(a: v4i32) -> v4i32 { +pub fn lsx_vrotri_w(a: v4i32) -> v4i32 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vrotri_w(a, IMM5) + unsafe { __lsx_vrotri_w(a, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vrotri_d(a: v2i64) -> v2i64 { +pub fn lsx_vrotri_d(a: v2i64) -> v2i64 { static_assert_uimm_bits!(IMM6, 6); - __lsx_vrotri_d(a, IMM6) + unsafe { __lsx_vrotri_d(a, IMM6) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vextl_q_d(a: v2i64) -> v2i64 { - __lsx_vextl_q_d(a) +pub fn lsx_vextl_q_d(a: v2i64) -> v2i64 { + unsafe { __lsx_vextl_q_d(a) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrlni_b_h(a: v16i8, b: v16i8) -> v16i8 { +pub fn lsx_vsrlni_b_h(a: v16i8, b: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM4, 4); - __lsx_vsrlni_b_h(a, b, IMM4) + unsafe { __lsx_vsrlni_b_h(a, b, IMM4) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrlni_h_w(a: v8i16, b: v8i16) -> v8i16 { +pub fn lsx_vsrlni_h_w(a: v8i16, b: v8i16) -> v8i16 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vsrlni_h_w(a, b, IMM5) + unsafe { __lsx_vsrlni_h_w(a, b, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrlni_w_d(a: v4i32, b: v4i32) -> v4i32 { +pub fn lsx_vsrlni_w_d(a: v4i32, b: v4i32) -> v4i32 { static_assert_uimm_bits!(IMM6, 6); - __lsx_vsrlni_w_d(a, b, IMM6) + unsafe { __lsx_vsrlni_w_d(a, b, IMM6) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrlni_d_q(a: v2i64, b: v2i64) -> v2i64 { +pub fn lsx_vsrlni_d_q(a: v2i64, b: v2i64) -> v2i64 { static_assert_uimm_bits!(IMM7, 7); - __lsx_vsrlni_d_q(a, b, IMM7) + unsafe { __lsx_vsrlni_d_q(a, b, IMM7) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrlrni_b_h(a: v16i8, b: v16i8) -> v16i8 { +pub fn lsx_vsrlrni_b_h(a: v16i8, b: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM4, 4); - __lsx_vsrlrni_b_h(a, b, IMM4) + unsafe { __lsx_vsrlrni_b_h(a, b, IMM4) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrlrni_h_w(a: v8i16, b: v8i16) -> v8i16 { +pub fn lsx_vsrlrni_h_w(a: v8i16, b: v8i16) -> v8i16 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vsrlrni_h_w(a, b, IMM5) + unsafe { __lsx_vsrlrni_h_w(a, b, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrlrni_w_d(a: v4i32, b: v4i32) -> v4i32 { +pub fn lsx_vsrlrni_w_d(a: v4i32, b: v4i32) -> v4i32 { static_assert_uimm_bits!(IMM6, 6); - __lsx_vsrlrni_w_d(a, b, IMM6) + unsafe { __lsx_vsrlrni_w_d(a, b, IMM6) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrlrni_d_q(a: v2i64, b: v2i64) -> v2i64 { +pub fn lsx_vsrlrni_d_q(a: v2i64, b: v2i64) -> v2i64 { static_assert_uimm_bits!(IMM7, 7); - __lsx_vsrlrni_d_q(a, b, IMM7) + unsafe { __lsx_vsrlrni_d_q(a, b, IMM7) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrlni_b_h(a: v16i8, b: v16i8) -> v16i8 { +pub fn lsx_vssrlni_b_h(a: v16i8, b: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM4, 4); - __lsx_vssrlni_b_h(a, b, IMM4) + unsafe { __lsx_vssrlni_b_h(a, b, IMM4) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrlni_h_w(a: v8i16, b: v8i16) -> v8i16 { +pub fn lsx_vssrlni_h_w(a: v8i16, b: v8i16) -> v8i16 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vssrlni_h_w(a, b, IMM5) + unsafe { __lsx_vssrlni_h_w(a, b, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrlni_w_d(a: v4i32, b: v4i32) -> v4i32 { +pub fn lsx_vssrlni_w_d(a: v4i32, b: v4i32) -> v4i32 { static_assert_uimm_bits!(IMM6, 6); - __lsx_vssrlni_w_d(a, b, IMM6) + unsafe { __lsx_vssrlni_w_d(a, b, IMM6) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrlni_d_q(a: v2i64, b: v2i64) -> v2i64 { +pub fn lsx_vssrlni_d_q(a: v2i64, b: v2i64) -> v2i64 { static_assert_uimm_bits!(IMM7, 7); - __lsx_vssrlni_d_q(a, b, IMM7) + unsafe { __lsx_vssrlni_d_q(a, b, IMM7) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrlni_bu_h(a: v16u8, b: v16i8) -> v16u8 { +pub fn lsx_vssrlni_bu_h(a: v16u8, b: v16i8) -> v16u8 { static_assert_uimm_bits!(IMM4, 4); - __lsx_vssrlni_bu_h(a, b, IMM4) + unsafe { __lsx_vssrlni_bu_h(a, b, IMM4) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrlni_hu_w(a: v8u16, b: v8i16) -> v8u16 { +pub fn lsx_vssrlni_hu_w(a: v8u16, b: v8i16) -> v8u16 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vssrlni_hu_w(a, b, IMM5) + unsafe { __lsx_vssrlni_hu_w(a, b, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrlni_wu_d(a: v4u32, b: v4i32) -> v4u32 { +pub fn lsx_vssrlni_wu_d(a: v4u32, b: v4i32) -> v4u32 { static_assert_uimm_bits!(IMM6, 6); - __lsx_vssrlni_wu_d(a, b, IMM6) + unsafe { __lsx_vssrlni_wu_d(a, b, IMM6) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrlni_du_q(a: v2u64, b: v2i64) -> v2u64 { +pub fn lsx_vssrlni_du_q(a: v2u64, b: v2i64) -> v2u64 { static_assert_uimm_bits!(IMM7, 7); - __lsx_vssrlni_du_q(a, b, IMM7) + unsafe { __lsx_vssrlni_du_q(a, b, IMM7) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrlrni_b_h(a: v16i8, b: v16i8) -> v16i8 { +pub fn lsx_vssrlrni_b_h(a: v16i8, b: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM4, 4); - __lsx_vssrlrni_b_h(a, b, IMM4) + unsafe { __lsx_vssrlrni_b_h(a, b, IMM4) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrlrni_h_w(a: v8i16, b: v8i16) -> v8i16 { +pub fn lsx_vssrlrni_h_w(a: v8i16, b: v8i16) -> v8i16 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vssrlrni_h_w(a, b, IMM5) + unsafe { __lsx_vssrlrni_h_w(a, b, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrlrni_w_d(a: v4i32, b: v4i32) -> v4i32 { +pub fn lsx_vssrlrni_w_d(a: v4i32, b: v4i32) -> v4i32 { static_assert_uimm_bits!(IMM6, 6); - __lsx_vssrlrni_w_d(a, b, IMM6) + unsafe { __lsx_vssrlrni_w_d(a, b, IMM6) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrlrni_d_q(a: v2i64, b: v2i64) -> v2i64 { +pub fn lsx_vssrlrni_d_q(a: v2i64, b: v2i64) -> v2i64 { static_assert_uimm_bits!(IMM7, 7); - __lsx_vssrlrni_d_q(a, b, IMM7) + unsafe { __lsx_vssrlrni_d_q(a, b, IMM7) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrlrni_bu_h(a: v16u8, b: v16i8) -> v16u8 { +pub fn lsx_vssrlrni_bu_h(a: v16u8, b: v16i8) -> v16u8 { static_assert_uimm_bits!(IMM4, 4); - __lsx_vssrlrni_bu_h(a, b, IMM4) + unsafe { __lsx_vssrlrni_bu_h(a, b, IMM4) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrlrni_hu_w(a: v8u16, b: v8i16) -> v8u16 { +pub fn lsx_vssrlrni_hu_w(a: v8u16, b: v8i16) -> v8u16 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vssrlrni_hu_w(a, b, IMM5) + unsafe { __lsx_vssrlrni_hu_w(a, b, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrlrni_wu_d(a: v4u32, b: v4i32) -> v4u32 { +pub fn lsx_vssrlrni_wu_d(a: v4u32, b: v4i32) -> v4u32 { static_assert_uimm_bits!(IMM6, 6); - __lsx_vssrlrni_wu_d(a, b, IMM6) + unsafe { __lsx_vssrlrni_wu_d(a, b, IMM6) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrlrni_du_q(a: v2u64, b: v2i64) -> v2u64 { +pub fn lsx_vssrlrni_du_q(a: v2u64, b: v2i64) -> v2u64 { static_assert_uimm_bits!(IMM7, 7); - __lsx_vssrlrni_du_q(a, b, IMM7) + unsafe { __lsx_vssrlrni_du_q(a, b, IMM7) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrani_b_h(a: v16i8, b: v16i8) -> v16i8 { +pub fn lsx_vsrani_b_h(a: v16i8, b: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM4, 4); - __lsx_vsrani_b_h(a, b, IMM4) + unsafe { __lsx_vsrani_b_h(a, b, IMM4) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrani_h_w(a: v8i16, b: v8i16) -> v8i16 { +pub fn lsx_vsrani_h_w(a: v8i16, b: v8i16) -> v8i16 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vsrani_h_w(a, b, IMM5) + unsafe { __lsx_vsrani_h_w(a, b, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrani_w_d(a: v4i32, b: v4i32) -> v4i32 { +pub fn lsx_vsrani_w_d(a: v4i32, b: v4i32) -> v4i32 { static_assert_uimm_bits!(IMM6, 6); - __lsx_vsrani_w_d(a, b, IMM6) + unsafe { __lsx_vsrani_w_d(a, b, IMM6) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrani_d_q(a: v2i64, b: v2i64) -> v2i64 { +pub fn lsx_vsrani_d_q(a: v2i64, b: v2i64) -> v2i64 { static_assert_uimm_bits!(IMM7, 7); - __lsx_vsrani_d_q(a, b, IMM7) + unsafe { __lsx_vsrani_d_q(a, b, IMM7) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrarni_b_h(a: v16i8, b: v16i8) -> v16i8 { +pub fn lsx_vsrarni_b_h(a: v16i8, b: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM4, 4); - __lsx_vsrarni_b_h(a, b, IMM4) + unsafe { __lsx_vsrarni_b_h(a, b, IMM4) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrarni_h_w(a: v8i16, b: v8i16) -> v8i16 { +pub fn lsx_vsrarni_h_w(a: v8i16, b: v8i16) -> v8i16 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vsrarni_h_w(a, b, IMM5) + unsafe { __lsx_vsrarni_h_w(a, b, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrarni_w_d(a: v4i32, b: v4i32) -> v4i32 { +pub fn lsx_vsrarni_w_d(a: v4i32, b: v4i32) -> v4i32 { static_assert_uimm_bits!(IMM6, 6); - __lsx_vsrarni_w_d(a, b, IMM6) + unsafe { __lsx_vsrarni_w_d(a, b, IMM6) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vsrarni_d_q(a: v2i64, b: v2i64) -> v2i64 { +pub fn lsx_vsrarni_d_q(a: v2i64, b: v2i64) -> v2i64 { static_assert_uimm_bits!(IMM7, 7); - __lsx_vsrarni_d_q(a, b, IMM7) + unsafe { __lsx_vsrarni_d_q(a, b, IMM7) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrani_b_h(a: v16i8, b: v16i8) -> v16i8 { +pub fn lsx_vssrani_b_h(a: v16i8, b: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM4, 4); - __lsx_vssrani_b_h(a, b, IMM4) + unsafe { __lsx_vssrani_b_h(a, b, IMM4) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrani_h_w(a: v8i16, b: v8i16) -> v8i16 { +pub fn lsx_vssrani_h_w(a: v8i16, b: v8i16) -> v8i16 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vssrani_h_w(a, b, IMM5) + unsafe { __lsx_vssrani_h_w(a, b, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrani_w_d(a: v4i32, b: v4i32) -> v4i32 { +pub fn lsx_vssrani_w_d(a: v4i32, b: v4i32) -> v4i32 { static_assert_uimm_bits!(IMM6, 6); - __lsx_vssrani_w_d(a, b, IMM6) + unsafe { __lsx_vssrani_w_d(a, b, IMM6) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrani_d_q(a: v2i64, b: v2i64) -> v2i64 { +pub fn lsx_vssrani_d_q(a: v2i64, b: v2i64) -> v2i64 { static_assert_uimm_bits!(IMM7, 7); - __lsx_vssrani_d_q(a, b, IMM7) + unsafe { __lsx_vssrani_d_q(a, b, IMM7) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrani_bu_h(a: v16u8, b: v16i8) -> v16u8 { +pub fn lsx_vssrani_bu_h(a: v16u8, b: v16i8) -> v16u8 { static_assert_uimm_bits!(IMM4, 4); - __lsx_vssrani_bu_h(a, b, IMM4) + unsafe { __lsx_vssrani_bu_h(a, b, IMM4) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrani_hu_w(a: v8u16, b: v8i16) -> v8u16 { +pub fn lsx_vssrani_hu_w(a: v8u16, b: v8i16) -> v8u16 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vssrani_hu_w(a, b, IMM5) + unsafe { __lsx_vssrani_hu_w(a, b, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrani_wu_d(a: v4u32, b: v4i32) -> v4u32 { +pub fn lsx_vssrani_wu_d(a: v4u32, b: v4i32) -> v4u32 { static_assert_uimm_bits!(IMM6, 6); - __lsx_vssrani_wu_d(a, b, IMM6) + unsafe { __lsx_vssrani_wu_d(a, b, IMM6) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrani_du_q(a: v2u64, b: v2i64) -> v2u64 { +pub fn lsx_vssrani_du_q(a: v2u64, b: v2i64) -> v2u64 { static_assert_uimm_bits!(IMM7, 7); - __lsx_vssrani_du_q(a, b, IMM7) + unsafe { __lsx_vssrani_du_q(a, b, IMM7) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrarni_b_h(a: v16i8, b: v16i8) -> v16i8 { +pub fn lsx_vssrarni_b_h(a: v16i8, b: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM4, 4); - __lsx_vssrarni_b_h(a, b, IMM4) + unsafe { __lsx_vssrarni_b_h(a, b, IMM4) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrarni_h_w(a: v8i16, b: v8i16) -> v8i16 { +pub fn lsx_vssrarni_h_w(a: v8i16, b: v8i16) -> v8i16 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vssrarni_h_w(a, b, IMM5) + unsafe { __lsx_vssrarni_h_w(a, b, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrarni_w_d(a: v4i32, b: v4i32) -> v4i32 { +pub fn lsx_vssrarni_w_d(a: v4i32, b: v4i32) -> v4i32 { static_assert_uimm_bits!(IMM6, 6); - __lsx_vssrarni_w_d(a, b, IMM6) + unsafe { __lsx_vssrarni_w_d(a, b, IMM6) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrarni_d_q(a: v2i64, b: v2i64) -> v2i64 { +pub fn lsx_vssrarni_d_q(a: v2i64, b: v2i64) -> v2i64 { static_assert_uimm_bits!(IMM7, 7); - __lsx_vssrarni_d_q(a, b, IMM7) + unsafe { __lsx_vssrarni_d_q(a, b, IMM7) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrarni_bu_h(a: v16u8, b: v16i8) -> v16u8 { +pub fn lsx_vssrarni_bu_h(a: v16u8, b: v16i8) -> v16u8 { static_assert_uimm_bits!(IMM4, 4); - __lsx_vssrarni_bu_h(a, b, IMM4) + unsafe { __lsx_vssrarni_bu_h(a, b, IMM4) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrarni_hu_w(a: v8u16, b: v8i16) -> v8u16 { +pub fn lsx_vssrarni_hu_w(a: v8u16, b: v8i16) -> v8u16 { static_assert_uimm_bits!(IMM5, 5); - __lsx_vssrarni_hu_w(a, b, IMM5) + unsafe { __lsx_vssrarni_hu_w(a, b, IMM5) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrarni_wu_d(a: v4u32, b: v4i32) -> v4u32 { +pub fn lsx_vssrarni_wu_d(a: v4u32, b: v4i32) -> v4u32 { static_assert_uimm_bits!(IMM6, 6); - __lsx_vssrarni_wu_d(a, b, IMM6) + unsafe { __lsx_vssrarni_wu_d(a, b, IMM6) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrarni_du_q(a: v2u64, b: v2i64) -> v2u64 { +pub fn lsx_vssrarni_du_q(a: v2u64, b: v2i64) -> v2u64 { static_assert_uimm_bits!(IMM7, 7); - __lsx_vssrarni_du_q(a, b, IMM7) + unsafe { __lsx_vssrarni_du_q(a, b, IMM7) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vpermi_w(a: v4i32, b: v4i32) -> v4i32 { +pub fn lsx_vpermi_w(a: v4i32, b: v4i32) -> v4i32 { static_assert_uimm_bits!(IMM8, 8); - __lsx_vpermi_w(a, b, IMM8) + unsafe { __lsx_vpermi_w(a, b, IMM8) } } #[inline] @@ -6381,66 +6381,66 @@ pub unsafe fn lsx_vst(a: v16i8, mem_addr: *mut i8) { #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrlrn_b_h(a: v8i16, b: v8i16) -> v16i8 { - __lsx_vssrlrn_b_h(a, b) +pub fn lsx_vssrlrn_b_h(a: v8i16, b: v8i16) -> v16i8 { + unsafe { __lsx_vssrlrn_b_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrlrn_h_w(a: v4i32, b: v4i32) -> v8i16 { - __lsx_vssrlrn_h_w(a, b) +pub fn lsx_vssrlrn_h_w(a: v4i32, b: v4i32) -> v8i16 { + unsafe { __lsx_vssrlrn_h_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrlrn_w_d(a: v2i64, b: v2i64) -> v4i32 { - __lsx_vssrlrn_w_d(a, b) +pub fn lsx_vssrlrn_w_d(a: v2i64, b: v2i64) -> v4i32 { + unsafe { __lsx_vssrlrn_w_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrln_b_h(a: v8i16, b: v8i16) -> v16i8 { - __lsx_vssrln_b_h(a, b) +pub fn lsx_vssrln_b_h(a: v8i16, b: v8i16) -> v16i8 { + unsafe { __lsx_vssrln_b_h(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrln_h_w(a: v4i32, b: v4i32) -> v8i16 { - __lsx_vssrln_h_w(a, b) +pub fn lsx_vssrln_h_w(a: v4i32, b: v4i32) -> v8i16 { + unsafe { __lsx_vssrln_h_w(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vssrln_w_d(a: v2i64, b: v2i64) -> v4i32 { - __lsx_vssrln_w_d(a, b) +pub fn lsx_vssrln_w_d(a: v2i64, b: v2i64) -> v4i32 { + unsafe { __lsx_vssrln_w_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vorn_v(a: v16i8, b: v16i8) -> v16i8 { - __lsx_vorn_v(a, b) +pub fn lsx_vorn_v(a: v16i8, b: v16i8) -> v16i8 { + unsafe { __lsx_vorn_v(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(0)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vldi() -> v2i64 { +pub fn lsx_vldi() -> v2i64 { static_assert_simm_bits!(IMM_S13, 13); - __lsx_vldi(IMM_S13) + unsafe { __lsx_vldi(IMM_S13) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vshuf_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8 { - __lsx_vshuf_b(a, b, c) +pub fn lsx_vshuf_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8 { + unsafe { __lsx_vshuf_b(a, b, c) } } #[inline] @@ -6460,420 +6460,420 @@ pub unsafe fn lsx_vstx(a: v16i8, mem_addr: *mut i8, b: i64) { #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vextl_qu_du(a: v2u64) -> v2u64 { - __lsx_vextl_qu_du(a) +pub fn lsx_vextl_qu_du(a: v2u64) -> v2u64 { + unsafe { __lsx_vextl_qu_du(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_bnz_b(a: v16u8) -> i32 { - __lsx_bnz_b(a) +pub fn lsx_bnz_b(a: v16u8) -> i32 { + unsafe { __lsx_bnz_b(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_bnz_d(a: v2u64) -> i32 { - __lsx_bnz_d(a) +pub fn lsx_bnz_d(a: v2u64) -> i32 { + unsafe { __lsx_bnz_d(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_bnz_h(a: v8u16) -> i32 { - __lsx_bnz_h(a) +pub fn lsx_bnz_h(a: v8u16) -> i32 { + unsafe { __lsx_bnz_h(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_bnz_v(a: v16u8) -> i32 { - __lsx_bnz_v(a) +pub fn lsx_bnz_v(a: v16u8) -> i32 { + unsafe { __lsx_bnz_v(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_bnz_w(a: v4u32) -> i32 { - __lsx_bnz_w(a) +pub fn lsx_bnz_w(a: v4u32) -> i32 { + unsafe { __lsx_bnz_w(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_bz_b(a: v16u8) -> i32 { - __lsx_bz_b(a) +pub fn lsx_bz_b(a: v16u8) -> i32 { + unsafe { __lsx_bz_b(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_bz_d(a: v2u64) -> i32 { - __lsx_bz_d(a) +pub fn lsx_bz_d(a: v2u64) -> i32 { + unsafe { __lsx_bz_d(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_bz_h(a: v8u16) -> i32 { - __lsx_bz_h(a) +pub fn lsx_bz_h(a: v8u16) -> i32 { + unsafe { __lsx_bz_h(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_bz_v(a: v16u8) -> i32 { - __lsx_bz_v(a) +pub fn lsx_bz_v(a: v16u8) -> i32 { + unsafe { __lsx_bz_v(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_bz_w(a: v4u32) -> i32 { - __lsx_bz_w(a) +pub fn lsx_bz_w(a: v4u32) -> i32 { + unsafe { __lsx_bz_w(a) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_caf_d(a: v2f64, b: v2f64) -> v2i64 { - __lsx_vfcmp_caf_d(a, b) +pub fn lsx_vfcmp_caf_d(a: v2f64, b: v2f64) -> v2i64 { + unsafe { __lsx_vfcmp_caf_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_caf_s(a: v4f32, b: v4f32) -> v4i32 { - __lsx_vfcmp_caf_s(a, b) +pub fn lsx_vfcmp_caf_s(a: v4f32, b: v4f32) -> v4i32 { + unsafe { __lsx_vfcmp_caf_s(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_ceq_d(a: v2f64, b: v2f64) -> v2i64 { - __lsx_vfcmp_ceq_d(a, b) +pub fn lsx_vfcmp_ceq_d(a: v2f64, b: v2f64) -> v2i64 { + unsafe { __lsx_vfcmp_ceq_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_ceq_s(a: v4f32, b: v4f32) -> v4i32 { - __lsx_vfcmp_ceq_s(a, b) +pub fn lsx_vfcmp_ceq_s(a: v4f32, b: v4f32) -> v4i32 { + unsafe { __lsx_vfcmp_ceq_s(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_cle_d(a: v2f64, b: v2f64) -> v2i64 { - __lsx_vfcmp_cle_d(a, b) +pub fn lsx_vfcmp_cle_d(a: v2f64, b: v2f64) -> v2i64 { + unsafe { __lsx_vfcmp_cle_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_cle_s(a: v4f32, b: v4f32) -> v4i32 { - __lsx_vfcmp_cle_s(a, b) +pub fn lsx_vfcmp_cle_s(a: v4f32, b: v4f32) -> v4i32 { + unsafe { __lsx_vfcmp_cle_s(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_clt_d(a: v2f64, b: v2f64) -> v2i64 { - __lsx_vfcmp_clt_d(a, b) +pub fn lsx_vfcmp_clt_d(a: v2f64, b: v2f64) -> v2i64 { + unsafe { __lsx_vfcmp_clt_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_clt_s(a: v4f32, b: v4f32) -> v4i32 { - __lsx_vfcmp_clt_s(a, b) +pub fn lsx_vfcmp_clt_s(a: v4f32, b: v4f32) -> v4i32 { + unsafe { __lsx_vfcmp_clt_s(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_cne_d(a: v2f64, b: v2f64) -> v2i64 { - __lsx_vfcmp_cne_d(a, b) +pub fn lsx_vfcmp_cne_d(a: v2f64, b: v2f64) -> v2i64 { + unsafe { __lsx_vfcmp_cne_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_cne_s(a: v4f32, b: v4f32) -> v4i32 { - __lsx_vfcmp_cne_s(a, b) +pub fn lsx_vfcmp_cne_s(a: v4f32, b: v4f32) -> v4i32 { + unsafe { __lsx_vfcmp_cne_s(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_cor_d(a: v2f64, b: v2f64) -> v2i64 { - __lsx_vfcmp_cor_d(a, b) +pub fn lsx_vfcmp_cor_d(a: v2f64, b: v2f64) -> v2i64 { + unsafe { __lsx_vfcmp_cor_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_cor_s(a: v4f32, b: v4f32) -> v4i32 { - __lsx_vfcmp_cor_s(a, b) +pub fn lsx_vfcmp_cor_s(a: v4f32, b: v4f32) -> v4i32 { + unsafe { __lsx_vfcmp_cor_s(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_cueq_d(a: v2f64, b: v2f64) -> v2i64 { - __lsx_vfcmp_cueq_d(a, b) +pub fn lsx_vfcmp_cueq_d(a: v2f64, b: v2f64) -> v2i64 { + unsafe { __lsx_vfcmp_cueq_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_cueq_s(a: v4f32, b: v4f32) -> v4i32 { - __lsx_vfcmp_cueq_s(a, b) +pub fn lsx_vfcmp_cueq_s(a: v4f32, b: v4f32) -> v4i32 { + unsafe { __lsx_vfcmp_cueq_s(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_cule_d(a: v2f64, b: v2f64) -> v2i64 { - __lsx_vfcmp_cule_d(a, b) +pub fn lsx_vfcmp_cule_d(a: v2f64, b: v2f64) -> v2i64 { + unsafe { __lsx_vfcmp_cule_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_cule_s(a: v4f32, b: v4f32) -> v4i32 { - __lsx_vfcmp_cule_s(a, b) +pub fn lsx_vfcmp_cule_s(a: v4f32, b: v4f32) -> v4i32 { + unsafe { __lsx_vfcmp_cule_s(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_cult_d(a: v2f64, b: v2f64) -> v2i64 { - __lsx_vfcmp_cult_d(a, b) +pub fn lsx_vfcmp_cult_d(a: v2f64, b: v2f64) -> v2i64 { + unsafe { __lsx_vfcmp_cult_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_cult_s(a: v4f32, b: v4f32) -> v4i32 { - __lsx_vfcmp_cult_s(a, b) +pub fn lsx_vfcmp_cult_s(a: v4f32, b: v4f32) -> v4i32 { + unsafe { __lsx_vfcmp_cult_s(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_cun_d(a: v2f64, b: v2f64) -> v2i64 { - __lsx_vfcmp_cun_d(a, b) +pub fn lsx_vfcmp_cun_d(a: v2f64, b: v2f64) -> v2i64 { + unsafe { __lsx_vfcmp_cun_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_cune_d(a: v2f64, b: v2f64) -> v2i64 { - __lsx_vfcmp_cune_d(a, b) +pub fn lsx_vfcmp_cune_d(a: v2f64, b: v2f64) -> v2i64 { + unsafe { __lsx_vfcmp_cune_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_cune_s(a: v4f32, b: v4f32) -> v4i32 { - __lsx_vfcmp_cune_s(a, b) +pub fn lsx_vfcmp_cune_s(a: v4f32, b: v4f32) -> v4i32 { + unsafe { __lsx_vfcmp_cune_s(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_cun_s(a: v4f32, b: v4f32) -> v4i32 { - __lsx_vfcmp_cun_s(a, b) +pub fn lsx_vfcmp_cun_s(a: v4f32, b: v4f32) -> v4i32 { + unsafe { __lsx_vfcmp_cun_s(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_saf_d(a: v2f64, b: v2f64) -> v2i64 { - __lsx_vfcmp_saf_d(a, b) +pub fn lsx_vfcmp_saf_d(a: v2f64, b: v2f64) -> v2i64 { + unsafe { __lsx_vfcmp_saf_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_saf_s(a: v4f32, b: v4f32) -> v4i32 { - __lsx_vfcmp_saf_s(a, b) +pub fn lsx_vfcmp_saf_s(a: v4f32, b: v4f32) -> v4i32 { + unsafe { __lsx_vfcmp_saf_s(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_seq_d(a: v2f64, b: v2f64) -> v2i64 { - __lsx_vfcmp_seq_d(a, b) +pub fn lsx_vfcmp_seq_d(a: v2f64, b: v2f64) -> v2i64 { + unsafe { __lsx_vfcmp_seq_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_seq_s(a: v4f32, b: v4f32) -> v4i32 { - __lsx_vfcmp_seq_s(a, b) +pub fn lsx_vfcmp_seq_s(a: v4f32, b: v4f32) -> v4i32 { + unsafe { __lsx_vfcmp_seq_s(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_sle_d(a: v2f64, b: v2f64) -> v2i64 { - __lsx_vfcmp_sle_d(a, b) +pub fn lsx_vfcmp_sle_d(a: v2f64, b: v2f64) -> v2i64 { + unsafe { __lsx_vfcmp_sle_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_sle_s(a: v4f32, b: v4f32) -> v4i32 { - __lsx_vfcmp_sle_s(a, b) +pub fn lsx_vfcmp_sle_s(a: v4f32, b: v4f32) -> v4i32 { + unsafe { __lsx_vfcmp_sle_s(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_slt_d(a: v2f64, b: v2f64) -> v2i64 { - __lsx_vfcmp_slt_d(a, b) +pub fn lsx_vfcmp_slt_d(a: v2f64, b: v2f64) -> v2i64 { + unsafe { __lsx_vfcmp_slt_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_slt_s(a: v4f32, b: v4f32) -> v4i32 { - __lsx_vfcmp_slt_s(a, b) +pub fn lsx_vfcmp_slt_s(a: v4f32, b: v4f32) -> v4i32 { + unsafe { __lsx_vfcmp_slt_s(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_sne_d(a: v2f64, b: v2f64) -> v2i64 { - __lsx_vfcmp_sne_d(a, b) +pub fn lsx_vfcmp_sne_d(a: v2f64, b: v2f64) -> v2i64 { + unsafe { __lsx_vfcmp_sne_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_sne_s(a: v4f32, b: v4f32) -> v4i32 { - __lsx_vfcmp_sne_s(a, b) +pub fn lsx_vfcmp_sne_s(a: v4f32, b: v4f32) -> v4i32 { + unsafe { __lsx_vfcmp_sne_s(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_sor_d(a: v2f64, b: v2f64) -> v2i64 { - __lsx_vfcmp_sor_d(a, b) +pub fn lsx_vfcmp_sor_d(a: v2f64, b: v2f64) -> v2i64 { + unsafe { __lsx_vfcmp_sor_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_sor_s(a: v4f32, b: v4f32) -> v4i32 { - __lsx_vfcmp_sor_s(a, b) +pub fn lsx_vfcmp_sor_s(a: v4f32, b: v4f32) -> v4i32 { + unsafe { __lsx_vfcmp_sor_s(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_sueq_d(a: v2f64, b: v2f64) -> v2i64 { - __lsx_vfcmp_sueq_d(a, b) +pub fn lsx_vfcmp_sueq_d(a: v2f64, b: v2f64) -> v2i64 { + unsafe { __lsx_vfcmp_sueq_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_sueq_s(a: v4f32, b: v4f32) -> v4i32 { - __lsx_vfcmp_sueq_s(a, b) +pub fn lsx_vfcmp_sueq_s(a: v4f32, b: v4f32) -> v4i32 { + unsafe { __lsx_vfcmp_sueq_s(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_sule_d(a: v2f64, b: v2f64) -> v2i64 { - __lsx_vfcmp_sule_d(a, b) +pub fn lsx_vfcmp_sule_d(a: v2f64, b: v2f64) -> v2i64 { + unsafe { __lsx_vfcmp_sule_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_sule_s(a: v4f32, b: v4f32) -> v4i32 { - __lsx_vfcmp_sule_s(a, b) +pub fn lsx_vfcmp_sule_s(a: v4f32, b: v4f32) -> v4i32 { + unsafe { __lsx_vfcmp_sule_s(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_sult_d(a: v2f64, b: v2f64) -> v2i64 { - __lsx_vfcmp_sult_d(a, b) +pub fn lsx_vfcmp_sult_d(a: v2f64, b: v2f64) -> v2i64 { + unsafe { __lsx_vfcmp_sult_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_sult_s(a: v4f32, b: v4f32) -> v4i32 { - __lsx_vfcmp_sult_s(a, b) +pub fn lsx_vfcmp_sult_s(a: v4f32, b: v4f32) -> v4i32 { + unsafe { __lsx_vfcmp_sult_s(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_sun_d(a: v2f64, b: v2f64) -> v2i64 { - __lsx_vfcmp_sun_d(a, b) +pub fn lsx_vfcmp_sun_d(a: v2f64, b: v2f64) -> v2i64 { + unsafe { __lsx_vfcmp_sun_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_sune_d(a: v2f64, b: v2f64) -> v2i64 { - __lsx_vfcmp_sune_d(a, b) +pub fn lsx_vfcmp_sune_d(a: v2f64, b: v2f64) -> v2i64 { + unsafe { __lsx_vfcmp_sune_d(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_sune_s(a: v4f32, b: v4f32) -> v4i32 { - __lsx_vfcmp_sune_s(a, b) +pub fn lsx_vfcmp_sune_s(a: v4f32, b: v4f32) -> v4i32 { + unsafe { __lsx_vfcmp_sune_s(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vfcmp_sun_s(a: v4f32, b: v4f32) -> v4i32 { - __lsx_vfcmp_sun_s(a, b) +pub fn lsx_vfcmp_sun_s(a: v4f32, b: v4f32) -> v4i32 { + unsafe { __lsx_vfcmp_sun_s(a, b) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(0)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vrepli_b() -> v16i8 { +pub fn lsx_vrepli_b() -> v16i8 { static_assert_simm_bits!(IMM_S10, 10); - __lsx_vrepli_b(IMM_S10) + unsafe { __lsx_vrepli_b(IMM_S10) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(0)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vrepli_d() -> v2i64 { +pub fn lsx_vrepli_d() -> v2i64 { static_assert_simm_bits!(IMM_S10, 10); - __lsx_vrepli_d(IMM_S10) + unsafe { __lsx_vrepli_d(IMM_S10) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(0)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vrepli_h() -> v8i16 { +pub fn lsx_vrepli_h() -> v8i16 { static_assert_simm_bits!(IMM_S10, 10); - __lsx_vrepli_h(IMM_S10) + unsafe { __lsx_vrepli_h(IMM_S10) } } #[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(0)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lsx_vrepli_w() -> v4i32 { +pub fn lsx_vrepli_w() -> v4i32 { static_assert_simm_bits!(IMM_S10, 10); - __lsx_vrepli_w(IMM_S10) + unsafe { __lsx_vrepli_w(IMM_S10) } } diff --git a/library/stdarch/crates/core_arch/src/loongarch64/mod.rs b/library/stdarch/crates/core_arch/src/loongarch64/mod.rs index b1704bbb48d4f..e8249805eae26 100644 --- a/library/stdarch/crates/core_arch/src/loongarch64/mod.rs +++ b/library/stdarch/crates/core_arch/src/loongarch64/mod.rs @@ -1,4 +1,4 @@ -//! `LoongArch` intrinsics +//! `LoongArch64` intrinsics mod lasx; mod lsx; @@ -13,89 +13,30 @@ use crate::arch::asm; /// Reads the 64-bit stable counter value and the counter ID #[inline] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn rdtime_d() -> (i64, isize) { - let val: i64; - let tid: isize; - asm!("rdtime.d {}, {}", out(reg) val, out(reg) tid, options(readonly, nostack)); - (val, tid) -} - -/// Reads the lower 32-bit stable counter value and the counter ID -#[inline] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn rdtimel_w() -> (i32, isize) { - let val: i32; - let tid: isize; - asm!("rdtimel.w {}, {}", out(reg) val, out(reg) tid, options(readonly, nostack)); - (val, tid) -} - -/// Reads the upper 32-bit stable counter value and the counter ID -#[inline] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn rdtimeh_w() -> (i32, isize) { - let val: i32; - let tid: isize; - asm!("rdtimeh.w {}, {}", out(reg) val, out(reg) tid, options(readonly, nostack)); +pub fn rdtime_d() -> (i64, isize) { + let (val, tid): (i64, isize); + unsafe { asm!("rdtime.d {}, {}", out(reg) val, out(reg) tid, options(readonly, nostack)) }; (val, tid) } #[allow(improper_ctypes)] unsafe extern "unadjusted" { - #[link_name = "llvm.loongarch.crc.w.b.w"] - fn __crc_w_b_w(a: i32, b: i32) -> i32; - #[link_name = "llvm.loongarch.crc.w.h.w"] - fn __crc_w_h_w(a: i32, b: i32) -> i32; - #[link_name = "llvm.loongarch.crc.w.w.w"] - fn __crc_w_w_w(a: i32, b: i32) -> i32; #[link_name = "llvm.loongarch.crc.w.d.w"] fn __crc_w_d_w(a: i64, b: i32) -> i32; - #[link_name = "llvm.loongarch.crcc.w.b.w"] - fn __crcc_w_b_w(a: i32, b: i32) -> i32; - #[link_name = "llvm.loongarch.crcc.w.h.w"] - fn __crcc_w_h_w(a: i32, b: i32) -> i32; - #[link_name = "llvm.loongarch.crcc.w.w.w"] - fn __crcc_w_w_w(a: i32, b: i32) -> i32; #[link_name = "llvm.loongarch.crcc.w.d.w"] fn __crcc_w_d_w(a: i64, b: i32) -> i32; #[link_name = "llvm.loongarch.cacop.d"] fn __cacop(a: i64, b: i64, c: i64); - #[link_name = "llvm.loongarch.dbar"] - fn __dbar(a: i32); - #[link_name = "llvm.loongarch.ibar"] - fn __ibar(a: i32); - #[link_name = "llvm.loongarch.movgr2fcsr"] - fn __movgr2fcsr(a: i32, b: i32); - #[link_name = "llvm.loongarch.movfcsr2gr"] - fn __movfcsr2gr(a: i32) -> i32; #[link_name = "llvm.loongarch.csrrd.d"] fn __csrrd(a: i32) -> i64; #[link_name = "llvm.loongarch.csrwr.d"] fn __csrwr(a: i64, b: i32) -> i64; #[link_name = "llvm.loongarch.csrxchg.d"] fn __csrxchg(a: i64, b: i64, c: i32) -> i64; - #[link_name = "llvm.loongarch.iocsrrd.b"] - fn __iocsrrd_b(a: i32) -> i32; - #[link_name = "llvm.loongarch.iocsrrd.h"] - fn __iocsrrd_h(a: i32) -> i32; - #[link_name = "llvm.loongarch.iocsrrd.w"] - fn __iocsrrd_w(a: i32) -> i32; #[link_name = "llvm.loongarch.iocsrrd.d"] fn __iocsrrd_d(a: i32) -> i64; - #[link_name = "llvm.loongarch.iocsrwr.b"] - fn __iocsrwr_b(a: i32, b: i32); - #[link_name = "llvm.loongarch.iocsrwr.h"] - fn __iocsrwr_h(a: i32, b: i32); - #[link_name = "llvm.loongarch.iocsrwr.w"] - fn __iocsrwr_w(a: i32, b: i32); #[link_name = "llvm.loongarch.iocsrwr.d"] fn __iocsrwr_d(a: i64, b: i32); - #[link_name = "llvm.loongarch.break"] - fn __break(a: i32); - #[link_name = "llvm.loongarch.cpucfg"] - fn __cpucfg(a: i32) -> i32; - #[link_name = "llvm.loongarch.syscall"] - fn __syscall(a: i32); #[link_name = "llvm.loongarch.asrtle.d"] fn __asrtle(a: i64, b: i64); #[link_name = "llvm.loongarch.asrtgt.d"] @@ -104,70 +45,20 @@ unsafe extern "unadjusted" { fn __lddir(a: i64, b: i64) -> i64; #[link_name = "llvm.loongarch.ldpte.d"] fn __ldpte(a: i64, b: i64); - #[link_name = "llvm.loongarch.frecipe.s"] - fn __frecipe_s(a: f32) -> f32; - #[link_name = "llvm.loongarch.frecipe.d"] - fn __frecipe_d(a: f64) -> f64; - #[link_name = "llvm.loongarch.frsqrte.s"] - fn __frsqrte_s(a: f32) -> f32; - #[link_name = "llvm.loongarch.frsqrte.d"] - fn __frsqrte_d(a: f64) -> f64; } /// Calculate the CRC value using the IEEE 802.3 polynomial (0xEDB88320) #[inline] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn crc_w_b_w(a: i32, b: i32) -> i32 { - __crc_w_b_w(a, b) -} - -/// Calculate the CRC value using the IEEE 802.3 polynomial (0xEDB88320) -#[inline] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn crc_w_h_w(a: i32, b: i32) -> i32 { - __crc_w_h_w(a, b) -} - -/// Calculate the CRC value using the IEEE 802.3 polynomial (0xEDB88320) -#[inline] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn crc_w_w_w(a: i32, b: i32) -> i32 { - __crc_w_w_w(a, b) -} - -/// Calculate the CRC value using the IEEE 802.3 polynomial (0xEDB88320) -#[inline] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn crc_w_d_w(a: i64, b: i32) -> i32 { - __crc_w_d_w(a, b) -} - -/// Calculate the CRC value using the Castagnoli polynomial (0x82F63B78) -#[inline] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn crcc_w_b_w(a: i32, b: i32) -> i32 { - __crcc_w_b_w(a, b) -} - -/// Calculate the CRC value using the Castagnoli polynomial (0x82F63B78) -#[inline] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn crcc_w_h_w(a: i32, b: i32) -> i32 { - __crcc_w_h_w(a, b) +pub fn crc_w_d_w(a: i64, b: i32) -> i32 { + unsafe { __crc_w_d_w(a, b) } } /// Calculate the CRC value using the Castagnoli polynomial (0x82F63B78) #[inline] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn crcc_w_w_w(a: i32, b: i32) -> i32 { - __crcc_w_w_w(a, b) -} - -/// Calculate the CRC value using the Castagnoli polynomial (0x82F63B78) -#[inline] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn crcc_w_d_w(a: i64, b: i32) -> i32 { - __crcc_w_d_w(a, b) +pub fn crcc_w_d_w(a: i64, b: i32) -> i32 { + unsafe { __crcc_w_d_w(a, b) } } /// Generates the cache operation instruction @@ -178,38 +69,6 @@ pub unsafe fn cacop(a: i64, b: i64) { __cacop(a, b, IMM12); } -/// Generates the memory barrier instruction -#[inline] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn dbar() { - static_assert_uimm_bits!(IMM15, 15); - __dbar(IMM15); -} - -/// Generates the instruction-fetch barrier instruction -#[inline] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn ibar() { - static_assert_uimm_bits!(IMM15, 15); - __ibar(IMM15); -} - -/// Moves data from a GPR to the FCSR -#[inline] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn movgr2fcsr(a: i32) { - static_assert_uimm_bits!(IMM5, 5); - __movgr2fcsr(IMM5, a); -} - -/// Moves data from a FCSR to the GPR -#[inline] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn movfcsr2gr() -> i32 { - static_assert_uimm_bits!(IMM5, 5); - __movfcsr2gr(IMM5) -} - /// Reads the CSR #[inline] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -234,27 +93,6 @@ pub unsafe fn csrxchg(a: i64, b: i64) -> i64 { __csrxchg(a, b, IMM14) } -/// Reads the 8-bit IO-CSR -#[inline] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn iocsrrd_b(a: i32) -> i32 { - __iocsrrd_b(a) -} - -/// Reads the 16-bit IO-CSR -#[inline] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn iocsrrd_h(a: i32) -> i32 { - __iocsrrd_h(a) -} - -/// Reads the 32-bit IO-CSR -#[inline] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn iocsrrd_w(a: i32) -> i32 { - __iocsrrd_w(a) -} - /// Reads the 64-bit IO-CSR #[inline] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -262,27 +100,6 @@ pub unsafe fn iocsrrd_d(a: i32) -> i64 { __iocsrrd_d(a) } -/// Writes the 8-bit IO-CSR -#[inline] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn iocsrwr_b(a: i32, b: i32) { - __iocsrwr_b(a, b) -} - -/// Writes the 16-bit IO-CSR -#[inline] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn iocsrwr_h(a: i32, b: i32) { - __iocsrwr_h(a, b) -} - -/// Writes the 32-bit IO-CSR -#[inline] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn iocsrwr_w(a: i32, b: i32) { - __iocsrwr_w(a, b) -} - /// Writes the 64-bit IO-CSR #[inline] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -290,29 +107,6 @@ pub unsafe fn iocsrwr_d(a: i64, b: i32) { __iocsrwr_d(a, b) } -/// Generates the breakpoint instruction -#[inline] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn brk() { - static_assert_uimm_bits!(IMM15, 15); - __break(IMM15); -} - -/// Reads the CPU configuration register -#[inline] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn cpucfg(a: i32) -> i32 { - __cpucfg(a) -} - -/// Generates the syscall instruction -#[inline] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn syscall() { - static_assert_uimm_bits!(IMM15, 15); - __syscall(IMM15); -} - /// Generates the less-than-or-equal asseration instruction #[inline] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -342,35 +136,3 @@ pub unsafe fn lddir(a: i64) -> i64 { pub unsafe fn ldpte(a: i64) { __ldpte(a, B) } - -/// Calculate the approximate single-precision result of 1.0 divided -#[inline] -#[target_feature(enable = "frecipe")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn frecipe_s(a: f32) -> f32 { - __frecipe_s(a) -} - -/// Calculate the approximate double-precision result of 1.0 divided -#[inline] -#[target_feature(enable = "frecipe")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn frecipe_d(a: f64) -> f64 { - __frecipe_d(a) -} - -/// Calculate the approximate single-precision result of dividing 1.0 by the square root -#[inline] -#[target_feature(enable = "frecipe")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn frsqrte_s(a: f32) -> f32 { - __frsqrte_s(a) -} - -/// Calculate the approximate double-precision result of dividing 1.0 by the square root -#[inline] -#[target_feature(enable = "frecipe")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn frsqrte_d(a: f64) -> f64 { - __frsqrte_d(a) -} diff --git a/library/stdarch/crates/core_arch/src/loongarch_shared/mod.rs b/library/stdarch/crates/core_arch/src/loongarch_shared/mod.rs new file mode 100644 index 0000000000000..710b926f8df4f --- /dev/null +++ b/library/stdarch/crates/core_arch/src/loongarch_shared/mod.rs @@ -0,0 +1,242 @@ +//! `Shared LoongArch` intrinsics + +use crate::arch::asm; + +/// Reads the lower 32-bit stable counter value and the counter ID +#[inline] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub fn rdtimel_w() -> (i32, isize) { + let (val, tid): (i32, isize); + unsafe { asm!("rdtimel.w {}, {}", out(reg) val, out(reg) tid, options(readonly, nostack)) }; + (val, tid) +} + +/// Reads the upper 32-bit stable counter value and the counter ID +#[inline] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub fn rdtimeh_w() -> (i32, isize) { + let (val, tid): (i32, isize); + unsafe { asm!("rdtimeh.w {}, {}", out(reg) val, out(reg) tid, options(readonly, nostack)) }; + (val, tid) +} + +#[allow(improper_ctypes)] +unsafe extern "unadjusted" { + #[link_name = "llvm.loongarch.crc.w.b.w"] + fn __crc_w_b_w(a: i32, b: i32) -> i32; + #[link_name = "llvm.loongarch.crc.w.h.w"] + fn __crc_w_h_w(a: i32, b: i32) -> i32; + #[link_name = "llvm.loongarch.crc.w.w.w"] + fn __crc_w_w_w(a: i32, b: i32) -> i32; + #[link_name = "llvm.loongarch.crcc.w.b.w"] + fn __crcc_w_b_w(a: i32, b: i32) -> i32; + #[link_name = "llvm.loongarch.crcc.w.h.w"] + fn __crcc_w_h_w(a: i32, b: i32) -> i32; + #[link_name = "llvm.loongarch.crcc.w.w.w"] + fn __crcc_w_w_w(a: i32, b: i32) -> i32; + #[link_name = "llvm.loongarch.dbar"] + fn __dbar(a: i32); + #[link_name = "llvm.loongarch.ibar"] + fn __ibar(a: i32); + #[link_name = "llvm.loongarch.movgr2fcsr"] + fn __movgr2fcsr(a: i32, b: i32); + #[link_name = "llvm.loongarch.movfcsr2gr"] + fn __movfcsr2gr(a: i32) -> i32; + #[link_name = "llvm.loongarch.iocsrrd.b"] + fn __iocsrrd_b(a: i32) -> i32; + #[link_name = "llvm.loongarch.iocsrrd.h"] + fn __iocsrrd_h(a: i32) -> i32; + #[link_name = "llvm.loongarch.iocsrrd.w"] + fn __iocsrrd_w(a: i32) -> i32; + #[link_name = "llvm.loongarch.iocsrwr.b"] + fn __iocsrwr_b(a: i32, b: i32); + #[link_name = "llvm.loongarch.iocsrwr.h"] + fn __iocsrwr_h(a: i32, b: i32); + #[link_name = "llvm.loongarch.iocsrwr.w"] + fn __iocsrwr_w(a: i32, b: i32); + #[link_name = "llvm.loongarch.break"] + fn __break(a: i32); + #[link_name = "llvm.loongarch.cpucfg"] + fn __cpucfg(a: i32) -> i32; + #[link_name = "llvm.loongarch.syscall"] + fn __syscall(a: i32); + #[link_name = "llvm.loongarch.frecipe.s"] + fn __frecipe_s(a: f32) -> f32; + #[link_name = "llvm.loongarch.frecipe.d"] + fn __frecipe_d(a: f64) -> f64; + #[link_name = "llvm.loongarch.frsqrte.s"] + fn __frsqrte_s(a: f32) -> f32; + #[link_name = "llvm.loongarch.frsqrte.d"] + fn __frsqrte_d(a: f64) -> f64; +} + +/// Calculate the CRC value using the IEEE 802.3 polynomial (0xEDB88320) +#[inline] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub fn crc_w_b_w(a: i32, b: i32) -> i32 { + unsafe { __crc_w_b_w(a, b) } +} + +/// Calculate the CRC value using the IEEE 802.3 polynomial (0xEDB88320) +#[inline] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub fn crc_w_h_w(a: i32, b: i32) -> i32 { + unsafe { __crc_w_h_w(a, b) } +} + +/// Calculate the CRC value using the IEEE 802.3 polynomial (0xEDB88320) +#[inline] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub fn crc_w_w_w(a: i32, b: i32) -> i32 { + unsafe { __crc_w_w_w(a, b) } +} + +/// Calculate the CRC value using the Castagnoli polynomial (0x82F63B78) +#[inline] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub fn crcc_w_b_w(a: i32, b: i32) -> i32 { + unsafe { __crcc_w_b_w(a, b) } +} + +/// Calculate the CRC value using the Castagnoli polynomial (0x82F63B78) +#[inline] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub fn crcc_w_h_w(a: i32, b: i32) -> i32 { + unsafe { __crcc_w_h_w(a, b) } +} + +/// Calculate the CRC value using the Castagnoli polynomial (0x82F63B78) +#[inline] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub fn crcc_w_w_w(a: i32, b: i32) -> i32 { + unsafe { __crcc_w_w_w(a, b) } +} + +/// Generates the memory barrier instruction +#[inline] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub fn dbar() { + static_assert_uimm_bits!(IMM15, 15); + unsafe { __dbar(IMM15) }; +} + +/// Generates the instruction-fetch barrier instruction +#[inline] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub fn ibar() { + static_assert_uimm_bits!(IMM15, 15); + unsafe { __ibar(IMM15) }; +} + +/// Moves data from a GPR to the FCSR +#[inline] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn movgr2fcsr(a: i32) { + static_assert_uimm_bits!(IMM5, 5); + __movgr2fcsr(IMM5, a); +} + +/// Moves data from a FCSR to the GPR +#[inline] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub fn movfcsr2gr() -> i32 { + static_assert_uimm_bits!(IMM5, 5); + unsafe { __movfcsr2gr(IMM5) } +} + +/// Reads the 8-bit IO-CSR +#[inline] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn iocsrrd_b(a: i32) -> i32 { + __iocsrrd_b(a) +} + +/// Reads the 16-bit IO-CSR +#[inline] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn iocsrrd_h(a: i32) -> i32 { + __iocsrrd_h(a) +} + +/// Reads the 32-bit IO-CSR +#[inline] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn iocsrrd_w(a: i32) -> i32 { + __iocsrrd_w(a) +} + +/// Writes the 8-bit IO-CSR +#[inline] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn iocsrwr_b(a: i32, b: i32) { + __iocsrwr_b(a, b) +} + +/// Writes the 16-bit IO-CSR +#[inline] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn iocsrwr_h(a: i32, b: i32) { + __iocsrwr_h(a, b) +} + +/// Writes the 32-bit IO-CSR +#[inline] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn iocsrwr_w(a: i32, b: i32) { + __iocsrwr_w(a, b) +} + +/// Generates the breakpoint instruction +#[inline] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn brk() { + static_assert_uimm_bits!(IMM15, 15); + __break(IMM15); +} + +/// Reads the CPU configuration register +#[inline] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub fn cpucfg(a: i32) -> i32 { + unsafe { __cpucfg(a) } +} + +/// Generates the syscall instruction +#[inline] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub unsafe fn syscall() { + static_assert_uimm_bits!(IMM15, 15); + __syscall(IMM15); +} + +/// Calculate the approximate single-precision result of 1.0 divided +#[inline] +#[target_feature(enable = "frecipe")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub fn frecipe_s(a: f32) -> f32 { + unsafe { __frecipe_s(a) } +} + +/// Calculate the approximate double-precision result of 1.0 divided +#[inline] +#[target_feature(enable = "frecipe")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub fn frecipe_d(a: f64) -> f64 { + unsafe { __frecipe_d(a) } +} + +/// Calculate the approximate single-precision result of dividing 1.0 by the square root +#[inline] +#[target_feature(enable = "frecipe")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub fn frsqrte_s(a: f32) -> f32 { + unsafe { __frsqrte_s(a) } +} + +/// Calculate the approximate double-precision result of dividing 1.0 by the square root +#[inline] +#[target_feature(enable = "frecipe")] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub fn frsqrte_d(a: f64) -> f64 { + unsafe { __frsqrte_d(a) } +} diff --git a/library/stdarch/crates/core_arch/src/mod.rs b/library/stdarch/crates/core_arch/src/mod.rs index f6e959efd47cb..2105cca1b4389 100644 --- a/library/stdarch/crates/core_arch/src/mod.rs +++ b/library/stdarch/crates/core_arch/src/mod.rs @@ -16,6 +16,9 @@ mod riscv_shared; ))] mod arm_shared; +#[cfg(any(target_arch = "loongarch32", target_arch = "loongarch64", doc))] +mod loongarch_shared; + mod simd; #[doc = include_str!("core_arch_docs.md")] @@ -271,13 +274,25 @@ pub mod arch { pub use crate::core_arch::nvptx::*; } - /// Platform-specific intrinsics for the `loongarch` platform. + /// Platform-specific intrinsics for the `loongarch32` platform. + /// + /// See the [module documentation](../index.html) for more details. + #[cfg(any(target_arch = "loongarch32", doc))] + #[doc(cfg(target_arch = "loongarch32"))] + #[unstable(feature = "stdarch_loongarch", issue = "117427")] + pub mod loongarch32 { + pub use crate::core_arch::loongarch_shared::*; + pub use crate::core_arch::loongarch32::*; + } + + /// Platform-specific intrinsics for the `loongarch64` platform. /// /// See the [module documentation](../index.html) for more details. #[cfg(any(target_arch = "loongarch64", doc))] #[doc(cfg(target_arch = "loongarch64"))] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub mod loongarch64 { + pub use crate::core_arch::loongarch_shared::*; pub use crate::core_arch::loongarch64::*; } @@ -334,6 +349,10 @@ mod powerpc64; #[doc(cfg(target_arch = "nvptx64"))] mod nvptx; +#[cfg(any(target_arch = "loongarch32", doc))] +#[doc(cfg(target_arch = "loongarch32"))] +mod loongarch32; + #[cfg(any(target_arch = "loongarch64", doc))] #[doc(cfg(target_arch = "loongarch64"))] mod loongarch64; diff --git a/library/stdarch/crates/core_arch/src/s390x/vector.rs b/library/stdarch/crates/core_arch/src/s390x/vector.rs index 1cd33c3554bde..a09a27a029c1e 100644 --- a/library/stdarch/crates/core_arch/src/s390x/vector.rs +++ b/library/stdarch/crates/core_arch/src/s390x/vector.rs @@ -1181,6 +1181,20 @@ mod sealed { impl_vec_trait! { [VectorOrc vec_orc]+ 2c (orc) } + // Z vector intrinsic C23 math.h LLVM IR ISO/IEC 60559 operation inexact vfidb parameters + // + // vec_rint rint llvm.rint roundToIntegralExact yes 0, 0 + // vec_roundc nearbyint llvm.nearbyint n/a no 4, 0 + // vec_floor / vec_roundm floor llvm.floor roundToIntegralTowardNegative no 4, 7 + // vec_ceil / vec_roundp ceil llvm.ceil roundToIntegralTowardPositive no 4, 6 + // vec_trunc / vec_roundz trunc llvm.trunc roundToIntegralTowardZero no 4, 5 + // vec_round roundeven llvm.roundeven roundToIntegralTiesToEven no 4, 4 + // n/a round llvm.round roundToIntegralTiesAway no 4, 1 + + // `simd_round_ties_even` is implemented as `llvm.rint`. + test_impl! { vec_rint_f32 (a: vector_float) -> vector_float [simd_round_ties_even, "vector-enhancements-1" vfisb] } + test_impl! { vec_rint_f64 (a: vector_double) -> vector_double [simd_round_ties_even, vfidb] } + test_impl! { vec_roundc_f32 (a: vector_float) -> vector_float [nearbyint_v4f32, "vector-enhancements-1" vfisb] } test_impl! { vec_roundc_f64 (a: vector_double) -> vector_double [nearbyint_v2f64, vfidb] } @@ -1189,9 +1203,6 @@ mod sealed { test_impl! { vec_round_f32 (a: vector_float) -> vector_float [roundeven_v4f32, _] } test_impl! { vec_round_f64 (a: vector_double) -> vector_double [roundeven_v2f64, _] } - test_impl! { vec_rint_f32 (a: vector_float) -> vector_float [simd_round_ties_even, "vector-enhancements-1" vfisb] } - test_impl! { vec_rint_f64 (a: vector_double) -> vector_double [simd_round_ties_even, vfidb] } - #[unstable(feature = "stdarch_s390x", issue = "135681")] pub trait VectorRoundc { unsafe fn vec_roundc(self) -> Self; @@ -2254,14 +2265,14 @@ mod sealed { #[inline] #[target_feature(enable = "vector")] - #[cfg_attr(test, assert_instr("vlbb"))] + #[cfg_attr(test, assert_instr(vlbb))] unsafe fn test_vec_load_bndry(ptr: *const i32) -> MaybeUninit { vector_signed_int::vec_load_bndry::<512>(ptr) } #[inline] #[target_feature(enable = "vector")] - #[cfg_attr(test, assert_instr(vst))] + #[cfg_attr(test, assert_instr(vstl))] unsafe fn test_vec_store_len(vector: vector_signed_int, ptr: *mut i32, byte_count: u32) { vector.vec_store_len(ptr, byte_count) } @@ -2787,11 +2798,11 @@ mod sealed { } test_impl! { vec_vmal_ib(a: vector_signed_char, b: vector_signed_char, c: vector_signed_char) -> vector_signed_char [simd_mladd, vmalb ] } - test_impl! { vec_vmal_ih(a: vector_signed_short, b: vector_signed_short, c: vector_signed_short) -> vector_signed_short[simd_mladd, vmalh ] } + test_impl! { vec_vmal_ih(a: vector_signed_short, b: vector_signed_short, c: vector_signed_short) -> vector_signed_short[simd_mladd, vmalhw ] } test_impl! { vec_vmal_if(a: vector_signed_int, b: vector_signed_int, c: vector_signed_int) -> vector_signed_int [simd_mladd, vmalf ] } test_impl! { vec_vmal_ub(a: vector_unsigned_char, b: vector_unsigned_char, c: vector_unsigned_char) -> vector_unsigned_char [simd_mladd, vmalb ] } - test_impl! { vec_vmal_uh(a: vector_unsigned_short, b: vector_unsigned_short, c: vector_unsigned_short) -> vector_unsigned_short[simd_mladd, vmalh ] } + test_impl! { vec_vmal_uh(a: vector_unsigned_short, b: vector_unsigned_short, c: vector_unsigned_short) -> vector_unsigned_short[simd_mladd, vmalhw ] } test_impl! { vec_vmal_uf(a: vector_unsigned_int, b: vector_unsigned_int, c: vector_unsigned_int) -> vector_unsigned_int [simd_mladd, vmalf ] } impl_mul!([VectorMladd vec_mladd] vec_vmal_ib (vector_signed_char, vector_signed_char, vector_signed_char) -> vector_signed_char ); diff --git a/library/stdarch/crates/core_arch/src/wasm32/mod.rs b/library/stdarch/crates/core_arch/src/wasm32/mod.rs index 2c4361f1639f7..60049c73295c1 100644 --- a/library/stdarch/crates/core_arch/src/wasm32/mod.rs +++ b/library/stdarch/crates/core_arch/src/wasm32/mod.rs @@ -191,6 +191,16 @@ unsafe extern "C-unwind" { // #[cfg_attr(test, assert_instr(throw, TAG = 0, ptr = core::ptr::null_mut()))] #[inline] #[unstable(feature = "wasm_exception_handling_intrinsics", issue = "122465")] +// FIXME: Since this instruction unwinds, `core` built with `-C panic=unwind` +// cannot be linked with `-C panic=abort` programs. But that's not +// entirely supported anyway, because runtimes without EH support won't +// be able to handle `try` blocks in `-C panic=unwind` crates either. +// We ship `-C panic=abort` `core`, so this doesn't affect users +// directly. Resolving this will likely require patching out both `try` +// and `throw` instructions, at which point we can look into whitelisting +// this function in the compiler to allow linking. +// See https://github.com/rust-lang/rust/issues/118168. +#[allow(ffi_unwind_calls)] pub unsafe fn throw(ptr: *mut u8) -> ! { static_assert!(TAG == 0); // LLVM only supports tag 0 == C++ right now. wasm_throw(TAG, ptr) diff --git a/library/stdarch/crates/intrinsic-test/src/arm/compile.rs b/library/stdarch/crates/intrinsic-test/src/arm/compile.rs index 8276cd87c1cbc..48a8ed950e3d8 100644 --- a/library/stdarch/crates/intrinsic-test/src/arm/compile.rs +++ b/library/stdarch/crates/intrinsic-test/src/arm/compile.rs @@ -1,64 +1,51 @@ -use crate::common::compile_c::CompilationCommandBuilder; -use crate::common::gen_c::compile_c_programs; +use crate::common::cli::ProcessedCli; +use crate::common::compile_c::{CompilationCommandBuilder, CppCompilation}; + +pub fn build_cpp_compilation(config: &ProcessedCli) -> Option { + let cpp_compiler = config.cpp_compiler.as_ref()?; -pub fn compile_c_arm( - intrinsics_name_list: &[String], - compiler: &str, - target: &str, - cxx_toolchain_dir: Option<&str>, -) -> bool { // -ffp-contract=off emulates Rust's approach of not fusing separate mul-add operations let mut command = CompilationCommandBuilder::new() .add_arch_flags(vec!["armv8.6-a", "crypto", "crc", "dotprod", "fp16"]) - .set_compiler(compiler) - .set_target(target) + .set_compiler(cpp_compiler) + .set_target(&config.target) .set_opt_level("2") - .set_cxx_toolchain_dir(cxx_toolchain_dir) + .set_cxx_toolchain_dir(config.cxx_toolchain_dir.as_deref()) .set_project_root("c_programs") .add_extra_flags(vec!["-ffp-contract=off", "-Wno-narrowing"]); - if !target.contains("v7") { + if !config.target.contains("v7") { command = command.add_arch_flags(vec!["faminmax", "lut", "sha3"]); } - /* - * clang++ cannot link an aarch64_be object file, so we invoke - * aarch64_be-unknown-linux-gnu's C++ linker. This ensures that we - * are testing the intrinsics against LLVM. - * - * Note: setting `--sysroot=<...>` which is the obvious thing to do - * does not work as it gets caught up with `#include_next ` - * not existing... - */ - if target.contains("aarch64_be") { - command = command - .set_linker( - cxx_toolchain_dir.unwrap_or("").to_string() + "/bin/aarch64_be-none-linux-gnu-g++", - ) - .set_include_paths(vec![ - "/include", - "/aarch64_be-none-linux-gnu/include", - "/aarch64_be-none-linux-gnu/include/c++/14.2.1", - "/aarch64_be-none-linux-gnu/include/c++/14.2.1/aarch64_be-none-linux-gnu", - "/aarch64_be-none-linux-gnu/include/c++/14.2.1/backward", - "/aarch64_be-none-linux-gnu/libc/usr/include", - ]); - } - - if !compiler.contains("clang") { + if !cpp_compiler.contains("clang") { command = command.add_extra_flag("-flax-vector-conversions"); } - let compiler_commands = intrinsics_name_list - .iter() - .map(|intrinsic_name| { - command - .clone() - .set_input_name(intrinsic_name) - .set_output_name(intrinsic_name) - .make_string() - }) - .collect::>(); + let mut cpp_compiler = command.into_cpp_compilation(); + + if config.target.contains("aarch64_be") { + let Some(ref cxx_toolchain_dir) = config.cxx_toolchain_dir else { + panic!( + "target `{}` must specify `cxx_toolchain_dir`", + config.target + ) + }; + + cpp_compiler.command_mut().args([ + &format!("--sysroot={cxx_toolchain_dir}/aarch64_be-none-linux-gnu/libc"), + "--include-directory", + &format!("{cxx_toolchain_dir}/aarch64_be-none-linux-gnu/include/c++/14.3.1"), + "--include-directory", + &format!("{cxx_toolchain_dir}/aarch64_be-none-linux-gnu/include/c++/14.3.1/aarch64_be-none-linux-gnu"), + "-L", + &format!("{cxx_toolchain_dir}/lib/gcc/aarch64_be-none-linux-gnu/14.3.1"), + "-L", + &format!("{cxx_toolchain_dir}/aarch64_be-none-linux-gnu/libc/usr/lib"), + "-B", + &format!("{cxx_toolchain_dir}/lib/gcc/aarch64_be-none-linux-gnu/14.3.1"), + ]); + } - compile_c_programs(&compiler_commands) + Some(cpp_compiler) } diff --git a/library/stdarch/crates/intrinsic-test/src/arm/config.rs b/library/stdarch/crates/intrinsic-test/src/arm/config.rs index cee80374ae9d8..9a7b37253d1cb 100644 --- a/library/stdarch/crates/intrinsic-test/src/arm/config.rs +++ b/library/stdarch/crates/intrinsic-test/src/arm/config.rs @@ -114,7 +114,6 @@ pub const AARCH_CONFIGURATIONS: &str = r#" #![cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), feature(stdarch_neon_fcma))] #![cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), feature(stdarch_neon_dotprod))] #![cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), feature(stdarch_neon_i8mm))] -#![cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), feature(stdarch_neon_sha3))] #![cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), feature(stdarch_neon_sm4))] #![cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), feature(stdarch_neon_ftts))] #![feature(fmt_helpers_for_derive)] diff --git a/library/stdarch/crates/intrinsic-test/src/arm/intrinsic.rs b/library/stdarch/crates/intrinsic-test/src/arm/intrinsic.rs index 773dabf4d75b1..16572b2c03f1a 100644 --- a/library/stdarch/crates/intrinsic-test/src/arm/intrinsic.rs +++ b/library/stdarch/crates/intrinsic-test/src/arm/intrinsic.rs @@ -1,8 +1,8 @@ use crate::common::argument::ArgumentList; use crate::common::indentation::Indentation; use crate::common::intrinsic::{Intrinsic, IntrinsicDefinition}; -use crate::common::intrinsic_helpers::{IntrinsicType, IntrinsicTypeDefinition, TypeKind}; -use std::ops::Deref; +use crate::common::intrinsic_helpers::{IntrinsicType, IntrinsicTypeDefinition, Sign, TypeKind}; +use std::ops::{Deref, DerefMut}; #[derive(Debug, Clone, PartialEq)] pub struct ArmIntrinsicType(pub IntrinsicType); @@ -15,6 +15,12 @@ impl Deref for ArmIntrinsicType { } } +impl DerefMut for ArmIntrinsicType { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + impl IntrinsicDefinition for Intrinsic { fn arguments(&self) -> ArgumentList { self.arguments.clone() @@ -73,8 +79,9 @@ impl IntrinsicDefinition for Intrinsic { TypeKind::Float if self.results().inner_size() == 16 => "float16_t".to_string(), TypeKind::Float if self.results().inner_size() == 32 => "float".to_string(), TypeKind::Float if self.results().inner_size() == 64 => "double".to_string(), - TypeKind::Int => format!("int{}_t", self.results().inner_size()), - TypeKind::UInt => format!("uint{}_t", self.results().inner_size()), + TypeKind::Int(Sign::Signed) => format!("int{}_t", self.results().inner_size()), + TypeKind::Int(Sign::Unsigned) => + format!("uint{}_t", self.results().inner_size()), TypeKind::Poly => format!("poly{}_t", self.results().inner_size()), ty => todo!("print_result_c - Unknown type: {:#?}", ty), }, diff --git a/library/stdarch/crates/intrinsic-test/src/arm/json_parser.rs b/library/stdarch/crates/intrinsic-test/src/arm/json_parser.rs index 0ac47484b0193..58d366c86a93a 100644 --- a/library/stdarch/crates/intrinsic-test/src/arm/json_parser.rs +++ b/library/stdarch/crates/intrinsic-test/src/arm/json_parser.rs @@ -110,7 +110,7 @@ fn json_to_intrinsic( Ok(Intrinsic { name, arguments, - results: *results, + results: results, arch_tags: intr.architectures, }) } diff --git a/library/stdarch/crates/intrinsic-test/src/arm/mod.rs b/library/stdarch/crates/intrinsic-test/src/arm/mod.rs index 6aaa49ff97f9b..0a64a24e73137 100644 --- a/library/stdarch/crates/intrinsic-test/src/arm/mod.rs +++ b/library/stdarch/crates/intrinsic-test/src/arm/mod.rs @@ -4,15 +4,20 @@ mod intrinsic; mod json_parser; mod types; +use std::fs::File; + +use rayon::prelude::*; + +use crate::arm::config::POLY128_OSTREAM_DEF; use crate::common::SupportedArchitectureTest; use crate::common::cli::ProcessedCli; use crate::common::compare::compare_outputs; +use crate::common::gen_c::{write_main_cpp, write_mod_cpp}; use crate::common::gen_rust::compile_rust_programs; use crate::common::intrinsic::{Intrinsic, IntrinsicDefinition}; use crate::common::intrinsic_helpers::TypeKind; -use crate::common::write_file::{write_c_testfiles, write_rust_testfiles}; -use compile::compile_c_arm; -use config::{AARCH_CONFIGURATIONS, F16_FORMATTING_DEF, POLY128_OSTREAM_DEF, build_notices}; +use crate::common::write_file::write_rust_testfiles; +use config::{AARCH_CONFIGURATIONS, F16_FORMATTING_DEF, build_notices}; use intrinsic::ArmIntrinsicType; use json_parser::get_neon_intrinsics; @@ -21,6 +26,13 @@ pub struct ArmArchitectureTest { cli_options: ProcessedCli, } +fn chunk_info(intrinsic_count: usize) -> (usize, usize) { + let available_parallelism = std::thread::available_parallelism().unwrap().get(); + let chunk_size = intrinsic_count.div_ceil(Ord::min(available_parallelism, intrinsic_count)); + + (chunk_size, intrinsic_count.div_ceil(chunk_size)) +} + impl SupportedArchitectureTest for ArmArchitectureTest { fn create(cli_options: ProcessedCli) -> Box { let a32 = cli_options.target.contains("v7"); @@ -51,33 +63,58 @@ impl SupportedArchitectureTest for ArmArchitectureTest { } fn build_c_file(&self) -> bool { - let compiler = self.cli_options.cpp_compiler.as_deref(); - let target = &self.cli_options.target; - let cxx_toolchain_dir = self.cli_options.cxx_toolchain_dir.as_deref(); let c_target = "aarch64"; + let platform_headers = &["arm_neon.h", "arm_acle.h", "arm_fp16.h"]; - let intrinsics_name_list = write_c_testfiles( - &self - .intrinsics - .iter() - .map(|i| i as &dyn IntrinsicDefinition<_>) - .collect::>(), - target, + let (chunk_size, chunk_count) = chunk_info(self.intrinsics.len()); + + let cpp_compiler = compile::build_cpp_compilation(&self.cli_options).unwrap(); + + let notice = &build_notices("// "); + self.intrinsics + .par_chunks(chunk_size) + .enumerate() + .map(|(i, chunk)| { + let c_filename = format!("c_programs/mod_{i}.cpp"); + let mut file = File::create(&c_filename).unwrap(); + write_mod_cpp(&mut file, notice, c_target, platform_headers, chunk).unwrap(); + + // compile this cpp file into a .o file + let output = cpp_compiler + .compile_object_file(&format!("mod_{i}.cpp"), &format!("mod_{i}.o"))?; + assert!(output.status.success(), "{output:?}"); + + Ok(()) + }) + .collect::>() + .unwrap(); + + let mut file = File::create("c_programs/main.cpp").unwrap(); + write_main_cpp( + &mut file, c_target, - &["arm_neon.h", "arm_acle.h", "arm_fp16.h"], - &build_notices("// "), - &[POLY128_OSTREAM_DEF], - ); + POLY128_OSTREAM_DEF, + self.intrinsics.iter().map(|i| i.name.as_str()), + ) + .unwrap(); - match compiler { - None => true, - Some(compiler) => compile_c_arm( - intrinsics_name_list.as_slice(), - compiler, - target, - cxx_toolchain_dir, - ), - } + // compile this cpp file into a .o file + info!("compiling main.cpp"); + let output = cpp_compiler + .compile_object_file("main.cpp", "intrinsic-test-programs.o") + .unwrap(); + assert!(output.status.success(), "{output:?}"); + + let object_files = (0..chunk_count) + .map(|i| format!("mod_{i}.o")) + .chain(["intrinsic-test-programs.o".to_owned()]); + + let output = cpp_compiler + .link_executable(object_files, "intrinsic-test-programs") + .unwrap(); + assert!(output.status.success(), "{output:?}"); + + true } fn build_rust_file(&self) -> bool { @@ -104,7 +141,7 @@ impl SupportedArchitectureTest for ArmArchitectureTest { } fn compare_outputs(&self) -> bool { - if let Some(ref toolchain) = self.cli_options.toolchain { + if self.cli_options.toolchain.is_some() { let intrinsics_name_list = self .intrinsics .iter() @@ -113,8 +150,7 @@ impl SupportedArchitectureTest for ArmArchitectureTest { compare_outputs( &intrinsics_name_list, - toolchain, - &self.cli_options.c_runner, + &self.cli_options.runner, &self.cli_options.target, ) } else { diff --git a/library/stdarch/crates/intrinsic-test/src/arm/types.rs b/library/stdarch/crates/intrinsic-test/src/arm/types.rs index 9f3d6302f460c..77f5e8d0e5602 100644 --- a/library/stdarch/crates/intrinsic-test/src/arm/types.rs +++ b/library/stdarch/crates/intrinsic-test/src/arm/types.rs @@ -1,6 +1,6 @@ use super::intrinsic::ArmIntrinsicType; use crate::common::cli::Language; -use crate::common::intrinsic_helpers::{IntrinsicType, IntrinsicTypeDefinition, TypeKind}; +use crate::common::intrinsic_helpers::{IntrinsicType, IntrinsicTypeDefinition, Sign, TypeKind}; impl IntrinsicTypeDefinition for ArmIntrinsicType { /// Gets a string containing the typename for this type in C format. @@ -73,8 +73,8 @@ impl IntrinsicTypeDefinition for ArmIntrinsicType { format!( "vld{len}{quad}_{type}{size}", type = match k { - TypeKind::UInt => "u", - TypeKind::Int => "s", + TypeKind::Int(Sign::Unsigned) => "u", + TypeKind::Int(Sign::Signed) => "s", TypeKind::Float => "f", // The ACLE doesn't support 64-bit polynomial loads on Armv7 // if armv7 and bl == 64, use "s", else "p" @@ -107,8 +107,8 @@ impl IntrinsicTypeDefinition for ArmIntrinsicType { format!( "vget{quad}_lane_{type}{size}", type = match k { - TypeKind::UInt => "u", - TypeKind::Int => "s", + TypeKind::Int(Sign::Unsigned) => "u", + TypeKind::Int(Sign::Signed) => "s", TypeKind::Float => "f", TypeKind::Poly => "p", x => todo!("get_load_function TypeKind: {:#?}", x), @@ -121,7 +121,7 @@ impl IntrinsicTypeDefinition for ArmIntrinsicType { } } - fn from_c(s: &str, target: &str) -> Result, String> { + fn from_c(s: &str, target: &str) -> Result { const CONST_STR: &str = "const"; if let Some(s) = s.strip_suffix('*') { let (s, constant) = match s.trim().strip_suffix(CONST_STR) { @@ -131,9 +131,8 @@ impl IntrinsicTypeDefinition for ArmIntrinsicType { let s = s.trim_end(); let temp_return = ArmIntrinsicType::from_c(s, target); temp_return.map(|mut op| { - let edited = op.as_mut(); - edited.0.ptr = true; - edited.0.ptr_constant = constant; + op.ptr = true; + op.ptr_constant = constant; op }) } else { @@ -163,7 +162,7 @@ impl IntrinsicTypeDefinition for ArmIntrinsicType { ), None => None, }; - Ok(Box::new(ArmIntrinsicType(IntrinsicType { + Ok(ArmIntrinsicType(IntrinsicType { ptr: false, ptr_constant: false, constant, @@ -172,14 +171,14 @@ impl IntrinsicTypeDefinition for ArmIntrinsicType { simd_len, vec_len, target: target.to_string(), - }))) + })) } else { let kind = start.parse::()?; let bit_len = match kind { - TypeKind::Int => Some(32), + TypeKind::Int(_) => Some(32), _ => None, }; - Ok(Box::new(ArmIntrinsicType(IntrinsicType { + Ok(ArmIntrinsicType(IntrinsicType { ptr: false, ptr_constant: false, constant, @@ -188,7 +187,7 @@ impl IntrinsicTypeDefinition for ArmIntrinsicType { simd_len: None, vec_len: None, target: target.to_string(), - }))) + })) } } } diff --git a/library/stdarch/crates/intrinsic-test/src/common/argument.rs b/library/stdarch/crates/intrinsic-test/src/common/argument.rs index 443ccb919f467..1df4f55995e4f 100644 --- a/library/stdarch/crates/intrinsic-test/src/common/argument.rs +++ b/library/stdarch/crates/intrinsic-test/src/common/argument.rs @@ -76,7 +76,7 @@ where Argument { pos, name: String::from(var_name), - ty: *ty, + ty: ty, constraint, } } @@ -125,19 +125,23 @@ where /// Creates a line for each argument that initializes an array for C from which `loads` argument /// values can be loaded as a sliding window. /// e.g `const int32x2_t a_vals = {0x3effffff, 0x3effffff, 0x3f7fffff}`, if loads=2. - pub fn gen_arglists_c(&self, indentation: Indentation, loads: u32) -> String { - self.iter() - .filter(|&arg| !arg.has_constraint()) - .map(|arg| { - format!( - "{indentation}const {ty} {name}_vals[] = {values};", - ty = arg.ty.c_scalar_type(), - name = arg.name, - values = arg.ty.populate_random(indentation, loads, &Language::C) - ) - }) - .collect::>() - .join("\n") + pub fn gen_arglists_c( + &self, + w: &mut impl std::io::Write, + indentation: Indentation, + loads: u32, + ) -> std::io::Result<()> { + for arg in self.iter().filter(|&arg| !arg.has_constraint()) { + writeln!( + w, + "{indentation}const {ty} {name}_vals[] = {values};", + ty = arg.ty.c_scalar_type(), + name = arg.name, + values = arg.ty.populate_random(indentation, loads, &Language::C) + )? + } + + Ok(()) } /// Creates a line for each argument that initializes an array for Rust from which `loads` argument diff --git a/library/stdarch/crates/intrinsic-test/src/common/cli.rs b/library/stdarch/crates/intrinsic-test/src/common/cli.rs index 1d572723008df..beae6a4b044da 100644 --- a/library/stdarch/crates/intrinsic-test/src/common/cli.rs +++ b/library/stdarch/crates/intrinsic-test/src/common/cli.rs @@ -60,7 +60,7 @@ pub struct ProcessedCli { pub filename: PathBuf, pub toolchain: Option, pub cpp_compiler: Option, - pub c_runner: String, + pub runner: String, pub target: String, pub linker: Option, pub cxx_toolchain_dir: Option, @@ -70,7 +70,7 @@ pub struct ProcessedCli { impl ProcessedCli { pub fn new(cli_options: Cli) -> Self { let filename = cli_options.input; - let c_runner = cli_options.runner.unwrap_or_default(); + let runner = cli_options.runner.unwrap_or_default(); let target = cli_options.target; let linker = cli_options.linker; let cxx_toolchain_dir = cli_options.cxx_toolchain_dir; @@ -102,7 +102,7 @@ impl ProcessedCli { Self { toolchain, cpp_compiler, - c_runner, + runner, target, linker, cxx_toolchain_dir, diff --git a/library/stdarch/crates/intrinsic-test/src/common/compare.rs b/library/stdarch/crates/intrinsic-test/src/common/compare.rs index 9e0cbe8cd6abe..cb55922eb199b 100644 --- a/library/stdarch/crates/intrinsic-test/src/common/compare.rs +++ b/library/stdarch/crates/intrinsic-test/src/common/compare.rs @@ -2,27 +2,25 @@ use super::cli::FailureReason; use rayon::prelude::*; use std::process::Command; -pub fn compare_outputs( - intrinsic_name_list: &Vec, - toolchain: &str, - runner: &str, - target: &str, -) -> bool { +pub fn compare_outputs(intrinsic_name_list: &Vec, runner: &str, target: &str) -> bool { + fn runner_command(runner: &str) -> Command { + let mut it = runner.split_whitespace(); + let mut cmd = Command::new(it.next().unwrap()); + cmd.args(it); + + cmd + } + let intrinsics = intrinsic_name_list .par_iter() .filter_map(|intrinsic_name| { - let c = Command::new("sh") - .arg("-c") - .arg(format!("{runner} ./c_programs/{intrinsic_name}")) + let c = runner_command(runner) + .arg("./c_programs/intrinsic-test-programs") + .arg(intrinsic_name) .output(); - let rust = Command::new("sh") - .current_dir("rust_programs") - .arg("-c") - .arg(format!( - "cargo {toolchain} run --target {target} --bin {intrinsic_name} --release", - )) - .env("RUSTFLAGS", "-Cdebuginfo=0") + let rust = runner_command(runner) + .arg(format!("target/{target}/release/{intrinsic_name}")) .output(); let (c, rust) = match (c, rust) { @@ -42,8 +40,8 @@ pub fn compare_outputs( if !rust.status.success() { error!( "Failed to run Rust program for intrinsic {intrinsic_name}\nstdout: {stdout}\nstderr: {stderr}", - stdout = std::str::from_utf8(&rust.stdout).unwrap_or(""), - stderr = std::str::from_utf8(&rust.stderr).unwrap_or(""), + stdout = String::from_utf8_lossy(&rust.stdout), + stderr = String::from_utf8_lossy(&rust.stderr), ); return Some(FailureReason::RunRust(intrinsic_name.clone())); } diff --git a/library/stdarch/crates/intrinsic-test/src/common/compile_c.rs b/library/stdarch/crates/intrinsic-test/src/common/compile_c.rs index aebb7b111e28c..0c905a149e48c 100644 --- a/library/stdarch/crates/intrinsic-test/src/common/compile_c.rs +++ b/library/stdarch/crates/intrinsic-test/src/common/compile_c.rs @@ -5,11 +5,7 @@ pub struct CompilationCommandBuilder { cxx_toolchain_dir: Option, arch_flags: Vec, optimization: String, - include_paths: Vec, project_root: Option, - output: String, - input: String, - linker: Option, extra_flags: Vec, } @@ -21,11 +17,7 @@ impl CompilationCommandBuilder { cxx_toolchain_dir: None, arch_flags: Vec::new(), optimization: "2".to_string(), - include_paths: Vec::new(), project_root: None, - output: String::new(), - input: String::new(), - linker: None, extra_flags: Vec::new(), } } @@ -57,37 +49,12 @@ impl CompilationCommandBuilder { self } - /// Sets a list of include paths for compilation. - /// The paths that are passed must be relative to the - /// "cxx_toolchain_dir" directory path. - pub fn set_include_paths(mut self, paths: Vec<&str>) -> Self { - self.include_paths = paths.into_iter().map(|path| path.to_string()).collect(); - self - } - /// Sets the root path of all the generated test files. pub fn set_project_root(mut self, path: &str) -> Self { self.project_root = Some(path.to_string()); self } - /// The name of the output executable, without any suffixes - pub fn set_output_name(mut self, path: &str) -> Self { - self.output = path.to_string(); - self - } - - /// The name of the input C file, without any suffixes - pub fn set_input_name(mut self, path: &str) -> Self { - self.input = path.to_string(); - self - } - - pub fn set_linker(mut self, linker: String) -> Self { - self.linker = Some(linker); - self - } - pub fn add_extra_flags(mut self, flags: Vec<&str>) -> Self { let mut flags: Vec = flags.into_iter().map(|f| f.to_string()).collect(); self.extra_flags.append(&mut flags); @@ -100,55 +67,69 @@ impl CompilationCommandBuilder { } impl CompilationCommandBuilder { - pub fn make_string(self) -> String { - let arch_flags = self.arch_flags.join("+"); + pub fn into_cpp_compilation(self) -> CppCompilation { + let mut cpp_compiler = std::process::Command::new(self.compiler); + + if let Some(project_root) = self.project_root { + cpp_compiler.current_dir(project_root); + } + let flags = std::env::var("CPPFLAGS").unwrap_or("".into()); - let project_root = self.project_root.unwrap_or_default(); - let project_root_str = project_root.as_str(); - let mut output = self.output.clone(); - if self.linker.is_some() { - output += ".o" - }; - let mut command = format!( - "{} {flags} -march={arch_flags} \ - -O{} \ - -o {project_root}/{} \ - {project_root}/{}.cpp", - self.compiler, self.optimization, output, self.input, - ); - - command = command + " " + self.extra_flags.join(" ").as_str(); + cpp_compiler.args(flags.split_whitespace()); + + cpp_compiler.arg(format!("-march={}", self.arch_flags.join("+"))); + + cpp_compiler.arg(format!("-O{}", self.optimization)); + + cpp_compiler.args(self.extra_flags); if let Some(target) = &self.target { - command = command + " --target=" + target; + cpp_compiler.arg(format!("--target={target}")); } - if let (Some(linker), Some(cxx_toolchain_dir)) = (&self.linker, &self.cxx_toolchain_dir) { - let include_args = self - .include_paths - .iter() - .map(|path| "--include-directory=".to_string() + cxx_toolchain_dir + path) - .collect::>() - .join(" "); - - command = command - + " -c " - + include_args.as_str() - + " && " - + linker - + " " - + project_root_str - + "/" - + &output - + " -o " - + project_root_str - + "/" - + &self.output - + " && rm " - + project_root_str - + "/" - + &output; - } - command + CppCompilation(cpp_compiler) + } +} + +pub struct CppCompilation(std::process::Command); + +fn clone_command(command: &std::process::Command) -> std::process::Command { + let mut cmd = std::process::Command::new(command.get_program()); + if let Some(current_dir) = command.get_current_dir() { + cmd.current_dir(current_dir); + } + cmd.args(command.get_args()); + + for (key, val) in command.get_envs() { + cmd.env(key, val.unwrap_or_default()); + } + + cmd +} + +impl CppCompilation { + pub fn command_mut(&mut self) -> &mut std::process::Command { + &mut self.0 + } + + pub fn compile_object_file( + &self, + input: &str, + output: &str, + ) -> std::io::Result { + let mut cmd = clone_command(&self.0); + cmd.args([input, "-c", "-o", output]); + cmd.output() + } + + pub fn link_executable( + &self, + inputs: impl Iterator, + output: &str, + ) -> std::io::Result { + let mut cmd = clone_command(&self.0); + cmd.args(inputs); + cmd.args(["-o", output]); + cmd.output() } } diff --git a/library/stdarch/crates/intrinsic-test/src/common/gen_c.rs b/library/stdarch/crates/intrinsic-test/src/common/gen_c.rs index 1cfb66c39b90f..905efb6d89096 100644 --- a/library/stdarch/crates/intrinsic-test/src/common/gen_c.rs +++ b/library/stdarch/crates/intrinsic-test/src/common/gen_c.rs @@ -1,8 +1,3 @@ -use itertools::Itertools; -use rayon::prelude::*; -use std::collections::BTreeMap; -use std::process::Command; - use super::argument::Argument; use super::indentation::Indentation; use super::intrinsic::IntrinsicDefinition; @@ -11,104 +6,16 @@ use super::intrinsic_helpers::IntrinsicTypeDefinition; // The number of times each intrinsic will be called. const PASSES: u32 = 20; -// Formats the main C program template with placeholders -pub fn format_c_main_template( - notices: &str, - header_files: &[&str], - arch_identifier: &str, - arch_specific_definitions: &[&str], - arglists: &str, - passes: &str, -) -> String { - format!( - r#"{notices}{header_files} -#include -#include -#include -#include - -template T1 cast(T2 x) {{ - static_assert(sizeof(T1) == sizeof(T2), "sizeof T1 and T2 must be the same"); - T1 ret{{}}; - memcpy(&ret, &x, sizeof(T1)); - return ret; -}} - -std::ostream& operator<<(std::ostream& os, float16_t value) {{ - uint16_t temp = 0; - memcpy(&temp, &value, sizeof(float16_t)); - std::stringstream ss; - ss << "0x" << std::setfill('0') << std::setw(4) << std::hex << temp; - os << ss.str(); - return os; -}} - -#ifdef __{arch_identifier}__ -{arch_specific_definitions} -#endif - -{arglists} - -int main(int argc, char **argv) {{ -{passes} - return 0; -}}"#, - header_files = header_files - .iter() - .map(|header| format!("#include <{header}>")) - .collect::>() - .join("\n"), - arch_specific_definitions = arch_specific_definitions.join("\n"), - ) -} - -pub fn compile_c_programs(compiler_commands: &[String]) -> bool { - compiler_commands - .par_iter() - .map(|compiler_command| { - let output = Command::new("sh").arg("-c").arg(compiler_command).output(); - if let Ok(output) = output { - if output.status.success() { - true - } else { - error!( - "Failed to compile code for intrinsics: \n\nstdout:\n{}\n\nstderr:\n{}", - std::str::from_utf8(&output.stdout).unwrap_or(""), - std::str::from_utf8(&output.stderr).unwrap_or("") - ); - false - } - } else { - error!("Command failed: {output:#?}"); - false - } - }) - .find_any(|x| !x) - .is_none() -} - -// Creates directory structure and file path mappings -pub fn setup_c_file_paths(identifiers: &Vec) -> BTreeMap<&String, String> { - let _ = std::fs::create_dir("c_programs"); - identifiers - .par_iter() - .map(|identifier| { - let c_filename = format!(r#"c_programs/{identifier}.cpp"#); - - (identifier, c_filename) - }) - .collect::>() -} - pub fn generate_c_test_loop( + w: &mut impl std::io::Write, intrinsic: &dyn IntrinsicDefinition, indentation: Indentation, additional: &str, passes: u32, - _target: &str, -) -> String { +) -> std::io::Result<()> { let body_indentation = indentation.nested(); - format!( + writeln!( + w, "{indentation}for (int i=0; i<{passes}; i++) {{\n\ {loaded_args}\ {body_indentation}auto __return_value = {intrinsic_call}({args});\n\ @@ -121,78 +28,172 @@ pub fn generate_c_test_loop( ) } -pub fn generate_c_constraint_blocks( +pub fn generate_c_constraint_blocks<'a, T: IntrinsicTypeDefinition + 'a>( + w: &mut impl std::io::Write, intrinsic: &dyn IntrinsicDefinition, indentation: Indentation, - constraints: &[&Argument], + constraints: &mut (impl Iterator> + Clone), name: String, - target: &str, -) -> String { - if let Some((current, constraints)) = constraints.split_last() { - let range = current - .constraint - .iter() - .map(|c| c.to_range()) - .flat_map(|r| r.into_iter()); - - let body_indentation = indentation.nested(); - range - .map(|i| { - format!( - "{indentation}{{\n\ - {body_indentation}{ty} {name} = {val};\n\ - {pass}\n\ - {indentation}}}", - name = current.name, - ty = current.ty.c_type(), - val = i, - pass = generate_c_constraint_blocks( - intrinsic, - body_indentation, - constraints, - format!("{name}-{i}"), - target, - ) - ) - }) - .join("\n") - } else { - generate_c_test_loop(intrinsic, indentation, &name, PASSES, target) +) -> std::io::Result<()> { + let Some(current) = constraints.next() else { + return generate_c_test_loop(w, intrinsic, indentation, &name, PASSES); + }; + + let body_indentation = indentation.nested(); + for i in current.constraint.iter().flat_map(|c| c.to_range()) { + let ty = current.ty.c_type(); + + writeln!(w, "{indentation}{{")?; + writeln!(w, "{body_indentation}{ty} {} = {i};", current.name)?; + + generate_c_constraint_blocks( + w, + intrinsic, + body_indentation, + &mut constraints.clone(), + format!("{name}-{i}"), + )?; + + writeln!(w, "{indentation}}}")?; } + + Ok(()) } // Compiles C test programs using specified compiler -pub fn create_c_test_program( +pub fn create_c_test_function( + w: &mut impl std::io::Write, intrinsic: &dyn IntrinsicDefinition, - header_files: &[&str], - target: &str, - c_target: &str, - notices: &str, - arch_specific_definitions: &[&str], -) -> String { +) -> std::io::Result<()> { + let indentation = Indentation::default(); + + writeln!(w, "int run_{}() {{", intrinsic.name())?; + + // Define the arrays of arguments. let arguments = intrinsic.arguments(); - let constraints = arguments - .iter() - .filter(|&i| i.has_constraint()) - .collect_vec(); + arguments.gen_arglists_c(w, indentation.nested(), PASSES)?; - let indentation = Indentation::default(); - format_c_main_template( - notices, - header_files, - c_target, - arch_specific_definitions, - intrinsic - .arguments() - .gen_arglists_c(indentation, PASSES) - .as_str(), - generate_c_constraint_blocks( - intrinsic, - indentation.nested(), - constraints.as_slice(), - Default::default(), - target, - ) - .as_str(), - ) + generate_c_constraint_blocks( + w, + intrinsic, + indentation.nested(), + &mut arguments.iter().rev().filter(|&i| i.has_constraint()), + Default::default(), + )?; + + writeln!(w, " return 0;")?; + writeln!(w, "}}")?; + + Ok(()) +} + +pub fn write_mod_cpp( + w: &mut impl std::io::Write, + notice: &str, + architecture: &str, + platform_headers: &[&str], + intrinsics: &[impl IntrinsicDefinition], +) -> std::io::Result<()> { + write!(w, "{notice}")?; + + for header in platform_headers { + writeln!(w, "#include <{header}>")?; + } + + writeln!( + w, + r#" +#include +#include +#include +#include + +template T1 cast(T2 x) {{ + static_assert(sizeof(T1) == sizeof(T2), "sizeof T1 and T2 must be the same"); + T1 ret{{}}; + memcpy(&ret, &x, sizeof(T1)); + return ret; +}} + +std::ostream& operator<<(std::ostream& os, float16_t value); + + + +"# + )?; + + writeln!(w, "#ifdef __{architecture}__")?; + writeln!( + w, + "std::ostream& operator<<(std::ostream& os, poly128_t value);" + )?; + writeln!(w, "#endif")?; + + for intrinsic in intrinsics { + create_c_test_function(w, intrinsic)?; + } + + Ok(()) +} + +pub fn write_main_cpp<'a>( + w: &mut impl std::io::Write, + architecture: &str, + arch_specific_definitions: &str, + intrinsics: impl Iterator + Clone, +) -> std::io::Result<()> { + writeln!(w, "#include ")?; + writeln!(w, "#include ")?; + + for header in ["arm_neon.h", "arm_acle.h", "arm_fp16.h"] { + writeln!(w, "#include <{header}>")?; + } + + writeln!( + w, + r#" +#include +#include +#include + +std::ostream& operator<<(std::ostream& os, float16_t value) {{ + uint16_t temp = 0; + memcpy(&temp, &value, sizeof(float16_t)); + std::stringstream ss; + ss << "0x" << std::setfill('0') << std::setw(4) << std::hex << temp; + os << ss.str(); + return os; +}} +"# + )?; + + writeln!(w, "#ifdef __{architecture}__")?; + writeln!(w, "{arch_specific_definitions }")?; + writeln!(w, "#endif")?; + + for intrinsic in intrinsics.clone() { + writeln!(w, "extern int run_{intrinsic}(void);")?; + } + + writeln!(w, "int main(int argc, char **argv) {{")?; + writeln!(w, " std::string intrinsic_name = argv[1];")?; + + writeln!(w, " if (false) {{")?; + + for intrinsic in intrinsics { + writeln!(w, " }} else if (intrinsic_name == \"{intrinsic}\") {{")?; + writeln!(w, " return run_{intrinsic}();")?; + } + + writeln!(w, " }} else {{")?; + writeln!( + w, + " std::cerr << \"Unknown command: \" << intrinsic_name << \"\\n\";" + )?; + writeln!(w, " return -1;")?; + writeln!(w, " }}")?; + + writeln!(w, "}}")?; + + Ok(()) } diff --git a/library/stdarch/crates/intrinsic-test/src/common/gen_rust.rs b/library/stdarch/crates/intrinsic-test/src/common/gen_rust.rs index 52bccaf905c51..0e4a95ab528a0 100644 --- a/library/stdarch/crates/intrinsic-test/src/common/gen_rust.rs +++ b/library/stdarch/crates/intrinsic-test/src/common/gen_rust.rs @@ -2,7 +2,6 @@ use itertools::Itertools; use rayon::prelude::*; use std::collections::BTreeMap; use std::fs::File; -use std::io::Write; use std::process::Command; use super::argument::Argument; @@ -23,8 +22,8 @@ pub fn format_rust_main_template( ) -> String { format!( r#"{notices}#![feature(simd_ffi)] -#![feature(link_llvm_intrinsics)] #![feature(f16)] +#![allow(unused)] {configurations} {definitions} @@ -38,6 +37,42 @@ fn main() {{ ) } +fn write_cargo_toml(w: &mut impl std::io::Write, binaries: &[String]) -> std::io::Result<()> { + writeln!( + w, + concat!( + "[package]\n", + "name = \"intrinsic-test-programs\"\n", + "version = \"{version}\"\n", + "authors = [{authors}]\n", + "license = \"{license}\"\n", + "edition = \"2018\"\n", + "[workspace]\n", + "[dependencies]\n", + "core_arch = {{ path = \"../crates/core_arch\" }}", + ), + version = env!("CARGO_PKG_VERSION"), + authors = env!("CARGO_PKG_AUTHORS") + .split(":") + .format_with(", ", |author, fmt| fmt(&format_args!("\"{author}\""))), + license = env!("CARGO_PKG_LICENSE"), + )?; + + for binary in binaries { + writeln!( + w, + concat!( + "[[bin]]\n", + "name = \"{binary}\"\n", + "path = \"{binary}/main.rs\"\n", + ), + binary = binary, + )?; + } + + Ok(()) +} + pub fn compile_rust_programs( binaries: Vec, toolchain: Option<&str>, @@ -45,56 +80,20 @@ pub fn compile_rust_programs( linker: Option<&str>, ) -> bool { let mut cargo = File::create("rust_programs/Cargo.toml").unwrap(); - cargo - .write_all( - format!( - r#"[package] -name = "intrinsic-test-programs" -version = "{version}" -authors = [{authors}] -license = "{license}" -edition = "2018" -[workspace] -[dependencies] -core_arch = {{ path = "../crates/core_arch" }} -{binaries}"#, - version = env!("CARGO_PKG_VERSION"), - authors = env!("CARGO_PKG_AUTHORS") - .split(":") - .format_with(", ", |author, fmt| fmt(&format_args!("\"{author}\""))), - license = env!("CARGO_PKG_LICENSE"), - binaries = binaries - .iter() - .map(|binary| { - format!( - r#"[[bin]] -name = "{binary}" -path = "{binary}/main.rs""#, - ) - }) - .collect::>() - .join("\n") - ) - .into_bytes() - .as_slice(), - ) - .unwrap(); - - let toolchain = match toolchain { - None => return true, - Some(t) => t, - }; + write_cargo_toml(&mut cargo, &binaries).unwrap(); /* If there has been a linker explicitly set from the command line then * we want to set it via setting it in the RUSTFLAGS*/ - let cargo_command = format!("cargo {toolchain} build --target {target} --release"); + let mut cargo_command = Command::new("cargo"); + cargo_command.current_dir("rust_programs"); - let mut command = Command::new("sh"); - command - .current_dir("rust_programs") - .arg("-c") - .arg(cargo_command); + if let Some(toolchain) = toolchain { + if !toolchain.is_empty() { + cargo_command.arg(toolchain); + } + } + cargo_command.args(["build", "--target", target, "--release"]); let mut rust_flags = "-Cdebuginfo=0".to_string(); if let Some(linker) = linker { @@ -102,11 +101,11 @@ path = "{binary}/main.rs""#, rust_flags.push_str(linker); rust_flags.push_str(" -C link-args=-static"); - command.env("CPPFLAGS", "-fuse-ld=lld"); + cargo_command.env("CPPFLAGS", "-fuse-ld=lld"); } - command.env("RUSTFLAGS", rust_flags); - let output = command.output(); + cargo_command.env("RUSTFLAGS", rust_flags); + let output = cargo_command.output(); if let Ok(output) = output { if output.status.success() { diff --git a/library/stdarch/crates/intrinsic-test/src/common/intrinsic_helpers.rs b/library/stdarch/crates/intrinsic-test/src/common/intrinsic_helpers.rs index 3d200b19461e1..697f9c8754da1 100644 --- a/library/stdarch/crates/intrinsic-test/src/common/intrinsic_helpers.rs +++ b/library/stdarch/crates/intrinsic-test/src/common/intrinsic_helpers.rs @@ -8,14 +8,22 @@ use super::cli::Language; use super::indentation::Indentation; use super::values::value_for_array; +#[derive(Debug, PartialEq, Copy, Clone)] +pub enum Sign { + Signed, + Unsigned, +} + #[derive(Debug, PartialEq, Copy, Clone)] pub enum TypeKind { BFloat, Float, - Int, - UInt, + Int(Sign), + Char(Sign), Poly, Void, + Mask, + Vector, } impl FromStr for TypeKind { @@ -23,12 +31,17 @@ impl FromStr for TypeKind { fn from_str(s: &str) -> Result { match s { - "bfloat" => Ok(Self::BFloat), - "float" => Ok(Self::Float), - "int" => Ok(Self::Int), + "bfloat" | "BF16" => Ok(Self::BFloat), + "float" | "double" | "FP16" | "FP32" | "FP64" => Ok(Self::Float), + "int" | "long" | "short" | "SI8" | "SI16" | "SI32" | "SI64" => { + Ok(Self::Int(Sign::Signed)) + } "poly" => Ok(Self::Poly), - "uint" | "unsigned" => Ok(Self::UInt), + "char" => Ok(Self::Char(Sign::Signed)), + "uint" | "unsigned" | "UI8" | "UI16" | "UI32" | "UI64" => Ok(Self::Int(Sign::Unsigned)), "void" => Ok(Self::Void), + "MASK" => Ok(Self::Mask), + "M64" | "M128" | "M256" | "M512" => Ok(Self::Vector), _ => Err(format!("Impossible to parse argument kind {s}")), } } @@ -42,10 +55,14 @@ impl fmt::Display for TypeKind { match self { Self::BFloat => "bfloat", Self::Float => "float", - Self::Int => "int", - Self::UInt => "uint", + Self::Int(Sign::Signed) => "int", + Self::Int(Sign::Unsigned) => "uint", Self::Poly => "poly", Self::Void => "void", + Self::Char(Sign::Signed) => "char", + Self::Char(Sign::Unsigned) => "unsigned char", + Self::Mask => "mask", + Self::Vector => "vector", } ) } @@ -56,9 +73,10 @@ impl TypeKind { pub fn c_prefix(&self) -> &str { match self { Self::Float => "float", - Self::Int => "int", - Self::UInt => "uint", + Self::Int(Sign::Signed) => "int", + Self::Int(Sign::Unsigned) => "uint", Self::Poly => "poly", + Self::Char(Sign::Signed) => "char", _ => unreachable!("Not used: {:#?}", self), } } @@ -66,10 +84,13 @@ impl TypeKind { /// Gets the rust prefix for the type kind i.e. i, u, f. pub fn rust_prefix(&self) -> &str { match self { + Self::BFloat => "bf", Self::Float => "f", - Self::Int => "i", - Self::UInt => "u", + Self::Int(Sign::Signed) => "i", + Self::Int(Sign::Unsigned) => "u", Self::Poly => "u", + Self::Char(Sign::Unsigned) => "u", + Self::Char(Sign::Signed) => "i", _ => unreachable!("Unused type kind: {:#?}", self), } } @@ -133,11 +154,14 @@ impl IntrinsicType { } pub fn c_scalar_type(&self) -> String { - format!( - "{prefix}{bits}_t", - prefix = self.kind().c_prefix(), - bits = self.inner_size() - ) + match self.kind() { + TypeKind::Char(_) => String::from("char"), + _ => format!( + "{prefix}{bits}_t", + prefix = self.kind().c_prefix(), + bits = self.inner_size() + ), + } } pub fn rust_scalar_type(&self) -> String { @@ -155,8 +179,8 @@ impl IntrinsicType { bit_len: Some(8), .. } => match kind { - TypeKind::Int => "(int)", - TypeKind::UInt => "(unsigned int)", + TypeKind::Int(Sign::Signed) => "(int)", + TypeKind::Int(Sign::Unsigned) => "(unsigned int)", TypeKind::Poly => "(unsigned int)(uint8_t)", _ => "", }, @@ -172,6 +196,21 @@ impl IntrinsicType { 128 => "", _ => panic!("invalid bit_len"), }, + IntrinsicType { + kind: TypeKind::Float, + bit_len: Some(bit_len), + .. + } => match bit_len { + 16 => "(float16_t)", + 32 => "(float)", + 64 => "(double)", + 128 => "", + _ => panic!("invalid bit_len"), + }, + IntrinsicType { + kind: TypeKind::Char(_), + .. + } => "(char)", _ => "", } } @@ -185,7 +224,7 @@ impl IntrinsicType { match self { IntrinsicType { bit_len: Some(bit_len @ (8 | 16 | 32 | 64)), - kind: kind @ (TypeKind::Int | TypeKind::UInt | TypeKind::Poly), + kind: kind @ (TypeKind::Int(_) | TypeKind::Poly | TypeKind::Char(_)), simd_len, vec_len, .. @@ -201,7 +240,8 @@ impl IntrinsicType { .format_with(",\n", |i, fmt| { let src = value_for_array(*bit_len, i); assert!(src == 0 || src.ilog2() < *bit_len); - if *kind == TypeKind::Int && (src >> (*bit_len - 1)) != 0 { + if *kind == TypeKind::Int(Sign::Signed) && (src >> (*bit_len - 1)) != 0 + { // `src` is a two's complement representation of a negative value. let mask = !0u64 >> (64 - *bit_len); let ones_compl = src ^ mask; @@ -257,7 +297,7 @@ impl IntrinsicType { .. } => false, IntrinsicType { - kind: TypeKind::Int | TypeKind::UInt | TypeKind::Poly, + kind: TypeKind::Int(_) | TypeKind::Poly, .. } => true, _ => unimplemented!(), @@ -282,7 +322,9 @@ pub trait IntrinsicTypeDefinition: Deref { fn get_lane_function(&self) -> String; /// can be implemented in an `impl` block - fn from_c(_s: &str, _target: &str) -> Result, String>; + fn from_c(_s: &str, _target: &str) -> Result + where + Self: Sized; /// Gets a string containing the typename for this type in C format. /// can be directly defined in `impl` blocks diff --git a/library/stdarch/crates/intrinsic-test/src/common/write_file.rs b/library/stdarch/crates/intrinsic-test/src/common/write_file.rs index 0ba3e829a6b80..92dd70b7c578a 100644 --- a/library/stdarch/crates/intrinsic-test/src/common/write_file.rs +++ b/library/stdarch/crates/intrinsic-test/src/common/write_file.rs @@ -1,5 +1,3 @@ -use super::gen_c::create_c_test_program; -use super::gen_c::setup_c_file_paths; use super::gen_rust::{create_rust_test_program, setup_rust_file_paths}; use super::intrinsic::IntrinsicDefinition; use super::intrinsic_helpers::IntrinsicTypeDefinition; @@ -11,37 +9,6 @@ pub fn write_file(filename: &String, code: String) { file.write_all(code.into_bytes().as_slice()).unwrap(); } -pub fn write_c_testfiles( - intrinsics: &Vec<&dyn IntrinsicDefinition>, - target: &str, - c_target: &str, - headers: &[&str], - notice: &str, - arch_specific_definitions: &[&str], -) -> Vec { - let intrinsics_name_list = intrinsics - .iter() - .map(|i| i.name().clone()) - .collect::>(); - let filename_mapping = setup_c_file_paths(&intrinsics_name_list); - - intrinsics.iter().for_each(|&i| { - let c_code = create_c_test_program( - i, - headers, - target, - c_target, - notice, - arch_specific_definitions, - ); - if let Some(filename) = filename_mapping.get(&i.name()) { - write_file(filename, c_code) - }; - }); - - intrinsics_name_list -} - pub fn write_rust_testfiles( intrinsics: Vec<&dyn IntrinsicDefinition>, rust_target: &str, diff --git a/library/stdarch/crates/intrinsic-test/src/main.rs b/library/stdarch/crates/intrinsic-test/src/main.rs index 054138a0dba1a..538f317a2978b 100644 --- a/library/stdarch/crates/intrinsic-test/src/main.rs +++ b/library/stdarch/crates/intrinsic-test/src/main.rs @@ -30,12 +30,15 @@ fn main() { let test_environment = test_environment_result.unwrap(); + info!("building C binaries"); if !test_environment.build_c_file() { std::process::exit(2); } + info!("building Rust binaries"); if !test_environment.build_rust_file() { std::process::exit(3); } + info!("comaparing outputs"); if !test_environment.compare_outputs() { std::process::exit(1); } diff --git a/library/stdarch/crates/simd-test-macro/src/lib.rs b/library/stdarch/crates/simd-test-macro/src/lib.rs index 855e969e1eb79..b18e2d6b63e54 100644 --- a/library/stdarch/crates/simd-test-macro/src/lib.rs +++ b/library/stdarch/crates/simd-test-macro/src/lib.rs @@ -57,12 +57,12 @@ pub fn simd_test( .unwrap_or_else(|| panic!("target triple contained no \"-\": {target}")) { "i686" | "x86_64" | "i586" => "is_x86_feature_detected", - "arm" | "armv7" => "is_arm_feature_detected", + "arm" | "armv7" | "thumbv7neon" => "is_arm_feature_detected", "aarch64" | "arm64ec" | "aarch64_be" => "is_aarch64_feature_detected", maybe_riscv if maybe_riscv.starts_with("riscv") => "is_riscv_feature_detected", "powerpc" | "powerpcle" => "is_powerpc_feature_detected", "powerpc64" | "powerpc64le" => "is_powerpc64_feature_detected", - "loongarch64" => "is_loongarch_feature_detected", + "loongarch32" | "loongarch64" => "is_loongarch_feature_detected", "s390x" => "is_s390x_feature_detected", t => panic!("unknown target: {t}"), }; diff --git a/library/stdarch/crates/std_detect/LICENSE-APACHE b/library/stdarch/crates/std_detect/LICENSE-APACHE deleted file mode 100644 index 16fe87b06e802..0000000000000 --- a/library/stdarch/crates/std_detect/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/library/stdarch/crates/std_detect/LICENSE-MIT b/library/stdarch/crates/std_detect/LICENSE-MIT deleted file mode 100644 index 52d82415d8b60..0000000000000 --- a/library/stdarch/crates/std_detect/LICENSE-MIT +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2017 The Rust Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/library/stdarch/crates/stdarch-gen-arm/Cargo.toml b/library/stdarch/crates/stdarch-gen-arm/Cargo.toml index 899296d25ea7e..312019f454cb0 100644 --- a/library/stdarch/crates/stdarch-gen-arm/Cargo.toml +++ b/library/stdarch/crates/stdarch-gen-arm/Cargo.toml @@ -13,7 +13,6 @@ edition = "2024" [dependencies] itertools = "0.14.0" -lazy_static = "1.4.0" proc-macro2 = "1.0" quote = "1.0" regex = "1.5" diff --git a/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml b/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml index f0dce681d9c30..a31613e6b1aef 100644 --- a/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml +++ b/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml @@ -187,7 +187,7 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: [*neon-stable] - assert_instr: [sabdl] + assert_instr: [sabdl2] safety: safe types: - [int8x16_t, int16x8_t, int8x8_t, uint8x8_t] @@ -230,7 +230,7 @@ intrinsics: - stable - - 'feature = "neon_intrinsics"' - 'since = "1.59.0"' - assert_instr: [sabdl] + assert_instr: [sabdl2] safety: safe types: - [int16x8_t, int32x4_t, int16x4_t, uint16x4_t] @@ -273,7 +273,7 @@ intrinsics: - stable - - 'feature = "neon_intrinsics"' - 'since = "1.59.0"' - assert_instr: [sabdl] + assert_instr: [sabdl2] safety: safe types: - [int32x4_t, int64x2_t, int32x2_t, uint32x2_t] @@ -1462,7 +1462,7 @@ intrinsics: arguments: ["a: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtl]]}]] + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtl2]]}]] - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] safety: safe types: @@ -1530,7 +1530,7 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] return_type: "{neon_type[2]}" attr: - - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtn]]}]] + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtn2]]}]] - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] safety: safe types: @@ -1582,7 +1582,7 @@ intrinsics: arguments: ["a: {type[0]}", "b: {neon_type[1]}"] return_type: "{type[2]}" attr: - - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtxn]]}]] + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtxn2]]}]] - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] safety: safe types: @@ -5147,7 +5147,7 @@ intrinsics: attr: - *neon-stable safety: safe - assert_instr: [pmull] + assert_instr: [pmull2] types: - [poly8x16_t, poly8x8_t, '[8, 9, 10, 11, 12, 13, 14, 15]', poly16x8_t] compose: @@ -5169,7 +5169,7 @@ intrinsics: - *neon-aes - *neon-stable safety: safe - assert_instr: [pmull] + assert_instr: [pmull2] types: - [poly64x2_t, "p128"] compose: @@ -5741,7 +5741,7 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] return_type: "{neon_type[0]}" attr: [*neon-stable] - assert_instr: [ssubw] + assert_instr: [ssubw2] safety: safe types: - [int16x8_t, int8x16_t, int8x8_t, '[8, 9, 10, 11, 12, 13, 14, 15]'] @@ -5762,7 +5762,7 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[1]}"] return_type: "{neon_type[0]}" attr: [*neon-stable] - assert_instr: [usubw] + assert_instr: [usubw2] safety: safe types: - [uint16x8_t, uint8x16_t, uint8x8_t, '[8, 9, 10, 11, 12, 13, 14, 15]'] @@ -5783,7 +5783,7 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: [*neon-stable] - assert_instr: [ssubl] + assert_instr: [ssubl2] safety: safe types: - [int8x16_t, int16x8_t, '[8, 9, 10, 11, 12, 13, 14, 15]', int8x8_t] @@ -5813,7 +5813,7 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: [*neon-stable] - assert_instr: [usubl] + assert_instr: [usubl2] safety: safe types: - [uint8x16_t, uint16x8_t, '[8, 9, 10, 11, 12, 13, 14, 15]', uint8x8_t] @@ -6580,7 +6580,6 @@ intrinsics: arch: aarch64,arm64ec - - name: "vmaxnm{neon_type.no}" doc: Floating-point Maximum Number (vector) arguments: ["a: {neon_type}", "b: {neon_type}"] @@ -6592,11 +6591,7 @@ intrinsics: - float64x1_t - float64x2_t compose: - - LLVMLink: - name: "fmaxnm.{neon_type}" - links: - - link: "llvm.aarch64.neon.fmaxnm.{neon_type}" - arch: aarch64,arm64ec + - FnCall: [simd_fmax, [a, b]] - name: "vmaxnmh_{type}" @@ -6611,11 +6606,7 @@ intrinsics: types: - f16 compose: - - LLVMLink: - name: "vmaxh.{neon_type}" - links: - - link: "llvm.aarch64.neon.fmaxnm.{type}" - arch: aarch64,arm64ec + - FnCall: ["f16::max", [a, b]] - name: "vminnmh_{type}" @@ -6630,11 +6621,7 @@ intrinsics: types: - f16 compose: - - LLVMLink: - name: "vminh.{neon_type}" - links: - - link: "llvm.aarch64.neon.fminnm.{type}" - arch: aarch64,arm64ec + - FnCall: ["f16::min", [a, b]] - name: "vmaxnmv{neon_type[0].no}" @@ -6648,11 +6635,7 @@ intrinsics: - [float32x2_t, f32] - [float64x2_t, f64] compose: - - LLVMLink: - name: "fmaxnmv.{neon_type[0]}" - links: - - link: "llvm.aarch64.neon.fmaxnmv.{type[1]}.{neon_type[0]}" - arch: aarch64,arm64ec + - FnCall: [simd_reduce_max, [a]] - name: "vmaxnmv{neon_type[0].no}" doc: Floating-point maximum number across vector @@ -6664,11 +6647,7 @@ intrinsics: types: - [float32x4_t, f32] compose: - - LLVMLink: - name: "fmaxnmv.{neon_type[0]}" - links: - - link: "llvm.aarch64.neon.fmaxnmv.{type[1]}.{neon_type[0]}" - arch: aarch64,arm64ec + - FnCall: [simd_reduce_max, [a]] - name: "vmaxnmv{neon_type[0].no}" @@ -6684,11 +6663,7 @@ intrinsics: - [float16x4_t, f16] - [float16x8_t, f16] compose: - - LLVMLink: - name: "fmaxnmv.{neon_type[0]}" - links: - - link: "llvm.aarch64.neon.fmaxnmv.{type[1]}.{neon_type[0]}" - arch: aarch64,arm64ec + - FnCall: [simd_reduce_max, [a]] - name: "vminnmv{neon_type[0].no}" @@ -6704,11 +6679,7 @@ intrinsics: - [float16x4_t, f16] - [float16x8_t, f16] compose: - - LLVMLink: - name: "fminnmv.{neon_type[0]}" - links: - - link: "llvm.aarch64.neon.fminnmv.{type[1]}.{neon_type[0]}" - arch: aarch64,arm64ec + - FnCall: [simd_reduce_min, [a]] - name: "vmaxv{neon_type[0].no}" @@ -6814,11 +6785,7 @@ intrinsics: - float64x1_t - float64x2_t compose: - - LLVMLink: - name: "fminnm.{neon_type}" - links: - - link: "llvm.aarch64.neon.fminnm.{neon_type}" - arch: aarch64,arm64ec + - FnCall: [simd_fmin, [a, b]] - name: "vminnmv{neon_type[0].no}" doc: "Floating-point minimum number across vector" @@ -6832,11 +6799,7 @@ intrinsics: - [float32x2_t, "f32"] - [float64x2_t, "f64"] compose: - - LLVMLink: - name: "vminnmv.{neon_type[0]}" - links: - - link: "llvm.aarch64.neon.fminnmv.{type[1]}.{neon_type[0]}" - arch: aarch64,arm64ec + - FnCall: [simd_reduce_min, [a]] - name: "vminnmv{neon_type[0].no}" doc: "Floating-point minimum number across vector" @@ -6849,11 +6812,7 @@ intrinsics: types: - [float32x4_t, "f32"] compose: - - LLVMLink: - name: "vminnmv.{neon_type[0]}" - links: - - link: "llvm.aarch64.neon.fminnmv.{type[1]}.{neon_type[0]}" - arch: aarch64,arm64ec + - FnCall: [simd_reduce_min, [a]] - name: "vmovl_high{neon_type[0].noq}" doc: Vector move @@ -9950,7 +9909,7 @@ intrinsics: return_type: "{neon_type[0]}" attr: - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [not, ['target_env = "msvc"']]}]]}, {FnCall: [assert_instr, [uabal]]}]] + - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [not, ['target_env = "msvc"']]}]]}, {FnCall: [assert_instr, [uabal2]]}]] safety: safe types: - [uint16x8_t, uint8x16_t, uint8x8_t, '[8, 9, 10, 11, 12, 13, 14, 15]', '[8, 9, 10, 11, 12, 13, 14, 15]'] @@ -9977,7 +9936,7 @@ intrinsics: return_type: "{neon_type[0]}" attr: - *neon-stable - - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [not, ['target_env = "msvc"']]}]]}, {FnCall: [assert_instr, [sabal]]}]] + - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [not, ['target_env = "msvc"']]}]]}, {FnCall: [assert_instr, [sabal2]]}]] safety: safe types: - [int16x8_t, int8x16_t, int8x16_t, '[8, 9, 10, 11, 12, 13, 14, 15]', int8x8_t, uint8x8_t] @@ -11386,7 +11345,7 @@ intrinsics: arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[1]}" attr: - - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [uabdl]]}]] + - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [uabdl2]]}]] - FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']] safety: safe types: @@ -13023,6 +12982,26 @@ intrinsics: - link: "llvm.aarch64.crc32cx" arch: aarch64,arm64ec + - name: "vabsd_s64" + doc: "Absolute Value (wrapping)." + arguments: ["a: {type[1]}"] + return_type: "{type[1]}" + attr: + - *neon-stable + assert_instr: [abs] + safety: safe + types: + - [i64, i64] + compose: + # This is behaviorally equivalent to `i64::wrapping_abs`, but keeps the value in a SIMD + # register. That can be beneficial when combined with other instructions. This LLVM + # issue provides some extra context https://github.com/llvm/llvm-project/issues/148388. + - LLVMLink: + name: "vabsd_s64" + links: + - link: "llvm.aarch64.neon.abs.i64" + arch: aarch64,arm64ec + - name: "{type[0]}" doc: "Absolute Value (wrapping)." arguments: ["a: {type[1]}"] @@ -13032,15 +13011,18 @@ intrinsics: assert_instr: [abs] safety: safe types: - - ['vabsd_s64', i64, i64] - ['vabs_s64', int64x1_t, v1i64] - ['vabsq_s64', int64x2_t, v2i64] compose: - - LLVMLink: - name: "{type[0]}" - links: - - link: "llvm.aarch64.neon.abs.{type[2]}" - arch: aarch64,arm64ec + - Let: + - neg + - "{type[1]}" + - FnCall: [simd_neg, [a]] + - Let: + - mask + - "{type[1]}" + - FnCall: [simd_ge, [a, neg]] + - FnCall: [simd_select, [mask, a, neg]] - name: "vuqadd{neon_type[0].no}" doc: "Signed saturating Accumulate of Unsigned value." @@ -13142,11 +13124,7 @@ intrinsics: types: - [int64x2_t, i64] compose: - - FnCall: - - transmute - - - FnCall: - - "vaddvq_u64" - - - FnCall: [transmute, [a]] + - FnCall: [simd_reduce_add_unordered, [a]] - name: "vpaddd_u64" doc: "Add pairwise" @@ -13159,7 +13137,7 @@ intrinsics: types: - [uint64x2_t, u64] compose: - - FnCall: [vaddvq_u64, [a]] + - FnCall: [simd_reduce_add_unordered, [a]] - name: "vaddv{neon_type[0].no}" doc: "Add across vector" @@ -13176,11 +13154,7 @@ intrinsics: - [int16x8_t, i16] - [int32x4_t, i32] compose: - - LLVMLink: - name: "vaddv{neon_type[0].no}" - links: - - link: "llvm.aarch64.neon.saddv.{type[1]}.{neon_type[0]}" - arch: aarch64,arm64ec + - FnCall: [simd_reduce_add_unordered, [a]] - name: "vaddv{neon_type[0].no}" doc: "Add across vector" @@ -13193,11 +13167,7 @@ intrinsics: types: - [int32x2_t, i32] compose: - - LLVMLink: - name: "vaddv{neon_type[0].no}" - links: - - link: "llvm.aarch64.neon.saddv.i32.{neon_type[0]}" - arch: aarch64,arm64ec + - FnCall: [simd_reduce_add_unordered, [a]] - name: "vaddv{neon_type[0].no}" doc: "Add across vector" @@ -13210,11 +13180,7 @@ intrinsics: types: - [int64x2_t, i64] compose: - - LLVMLink: - name: "vaddv{neon_type[0].no}" - links: - - link: "llvm.aarch64.neon.saddv.i64.{neon_type[0]}" - arch: aarch64,arm64ec + - FnCall: [simd_reduce_add_unordered, [a]] - name: "vaddv{neon_type[0].no}" doc: "Add across vector" @@ -13231,11 +13197,7 @@ intrinsics: - [uint16x8_t, u16] - [uint32x4_t, u32] compose: - - LLVMLink: - name: "vaddv{neon_type[0].no}" - links: - - link: "llvm.aarch64.neon.uaddv.{type[1]}.{neon_type[0]}" - arch: aarch64,arm64ec + - FnCall: [simd_reduce_add_unordered, [a]] - name: "vaddv{neon_type[0].no}" doc: "Add across vector" @@ -13248,11 +13210,7 @@ intrinsics: types: - [uint32x2_t, u32, i32] compose: - - LLVMLink: - name: "vaddv{neon_type[0].no}" - links: - - link: "llvm.aarch64.neon.uaddv.{type[2]}.{neon_type[0]}" - arch: aarch64,arm64ec + - FnCall: [simd_reduce_add_unordered, [a]] - name: "vaddv{neon_type[0].no}" doc: "Add across vector" @@ -13265,11 +13223,7 @@ intrinsics: types: - [uint64x2_t, u64, i64] compose: - - LLVMLink: - name: "vaddv{neon_type[0].no}" - links: - - link: "llvm.aarch64.neon.uaddv.{type[2]}.{neon_type[0]}" - arch: aarch64,arm64ec + - FnCall: [simd_reduce_add_unordered, [a]] - name: "vaddlv{neon_type[0].no}" doc: "Signed Add Long across Vector" @@ -13327,11 +13281,7 @@ intrinsics: - [int16x8_t, i16, 'smaxv'] - [int32x4_t, i32, 'smaxv'] compose: - - LLVMLink: - name: "vmaxv{neon_type[0].no}" - links: - - link: "llvm.aarch64.neon.smaxv.{type[1]}.{neon_type[0]}" - arch: aarch64,arm64ec + - FnCall: [simd_reduce_max, [a]] - name: "vmaxv{neon_type[0].no}" doc: "Horizontal vector max." @@ -13349,11 +13299,7 @@ intrinsics: - [uint16x8_t, u16, 'umaxv'] - [uint32x4_t, u32, 'umaxv'] compose: - - LLVMLink: - name: "vmaxv{neon_type[0].no}" - links: - - link: "llvm.aarch64.neon.umaxv.{type[1]}.{neon_type[0]}" - arch: aarch64,arm64ec + - FnCall: [simd_reduce_max, [a]] - name: "vmaxv{neon_type[0].no}" doc: "Horizontal vector max." @@ -13390,11 +13336,7 @@ intrinsics: - [int16x8_t, i16, 'sminv'] - [int32x4_t, i32, 'sminv'] compose: - - LLVMLink: - name: "vminv{neon_type[0].no}" - links: - - link: "llvm.aarch64.neon.sminv.{type[1]}.{neon_type[0]}" - arch: aarch64,arm64ec + - FnCall: [simd_reduce_min, [a]] - name: "vminv{neon_type[0].no}" doc: "Horizontal vector min." @@ -13412,11 +13354,7 @@ intrinsics: - [uint16x8_t, u16, 'uminv'] - [uint32x4_t, u32, 'uminv'] compose: - - LLVMLink: - name: "vminv{neon_type[0].no}" - links: - - link: "llvm.aarch64.neon.uminv.{type[1]}.{neon_type[0]}" - arch: aarch64,arm64ec + - FnCall: [simd_reduce_min, [a]] - name: "vminv{neon_type[0].no}" doc: "Horizontal vector min." diff --git a/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml b/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml index 07959cf380e8a..c96c6e2a0c0b5 100644 --- a/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml +++ b/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml @@ -7135,13 +7135,8 @@ intrinsics: - int32x2_t - int32x4_t compose: - - LLVMLink: - name: "smax.{neon_type}" - links: - - link: "llvm.arm.neon.vmaxs.{neon_type}" - arch: arm - - link: "llvm.aarch64.neon.smax.{neon_type}" - arch: aarch64,arm64ec + - Let: [mask, "{neon_type}", {FnCall: [simd_ge, [a, b]]}] + - FnCall: [simd_select, [mask, a, b]] - name: "vmax{neon_type.no}" doc: Maximum (vector) @@ -7162,13 +7157,8 @@ intrinsics: - uint32x2_t - uint32x4_t compose: - - LLVMLink: - name: "smax.{neon_type}" - links: - - link: "llvm.arm.neon.vmaxu.{neon_type}" - arch: arm - - link: "llvm.aarch64.neon.umax.{neon_type}" - arch: aarch64,arm64ec + - Let: [mask, "{neon_type}", {FnCall: [simd_ge, [a, b]]}] + - FnCall: [simd_select, [mask, a, b]] - name: "vmax{neon_type.no}" doc: Maximum (vector) @@ -7233,13 +7223,7 @@ intrinsics: - float32x2_t - float32x4_t compose: - - LLVMLink: - name: "fmaxnm.{neon_type}" - links: - - link: "llvm.arm.neon.vmaxnm.{neon_type}" - arch: arm - - link: "llvm.aarch64.neon.fmaxnm.{neon_type}" - arch: aarch64,arm64ec + - FnCall: [simd_fmax, [a, b]] - name: "vmaxnm{neon_type.no}" @@ -7257,13 +7241,7 @@ intrinsics: - float16x4_t - float16x8_t compose: - - LLVMLink: - name: "fmaxnm.{neon_type}" - links: - - link: "llvm.arm.neon.vmaxnm.{neon_type}" - arch: arm - - link: "llvm.aarch64.neon.fmaxnm.{neon_type}" - arch: aarch64,arm64ec + - FnCall: [simd_fmax, [a, b]] - name: "vminnm{neon_type.no}" @@ -7281,13 +7259,7 @@ intrinsics: - float16x4_t - float16x8_t compose: - - LLVMLink: - name: "fminnm.{neon_type}" - links: - - link: "llvm.arm.neon.vminnm.{neon_type}" - arch: arm - - link: "llvm.aarch64.neon.fminnm.{neon_type}" - arch: aarch64,arm64ec + - FnCall: [simd_fmin, [a, b]] - name: "vmin{neon_type.no}" @@ -7309,13 +7281,8 @@ intrinsics: - int32x2_t - int32x4_t compose: - - LLVMLink: - name: "smin.{neon_type}" - links: - - link: "llvm.arm.neon.vmins.{neon_type}" - arch: arm - - link: "llvm.aarch64.neon.smin.{neon_type}" - arch: aarch64,arm64ec + - Let: [mask, "{neon_type}", {FnCall: [simd_le, [a, b]]}] + - FnCall: [simd_select, [mask, a, b]] - name: "vmin{neon_type.no}" doc: "Minimum (vector)" @@ -7336,13 +7303,8 @@ intrinsics: - uint32x2_t - uint32x4_t compose: - - LLVMLink: - name: "umin.{neon_type}" - links: - - link: "llvm.arm.neon.vminu.{neon_type}" - arch: arm - - link: "llvm.aarch64.neon.umin.{neon_type}" - arch: aarch64,arm64ec + - Let: [mask, "{neon_type}", {FnCall: [simd_le, [a, b]]}] + - FnCall: [simd_select, [mask, a, b]] - name: "vmin{neon_type.no}" doc: "Minimum (vector)" @@ -7408,13 +7370,7 @@ intrinsics: - float32x2_t - float32x4_t compose: - - LLVMLink: - name: "fminnm.{neon_type}" - links: - - link: "llvm.arm.neon.vminnm.{neon_type}" - arch: arm - - link: "llvm.aarch64.neon.fminnm.{neon_type}" - arch: aarch64,arm64ec + - FnCall: [simd_fmin, [a, b]] - name: "vpadd{neon_type.no}" doc: Floating-point add pairwise @@ -7874,9 +7830,9 @@ intrinsics: static_defs: ['const N: i32'] safety: safe types: - - [int16x8_t, int8x8_t, 'N >= 1 && N <= 8', 'const { int16x8_t([-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16]) }'] - - [int32x4_t, int16x4_t, 'N >= 1 && N <= 16', 'const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }'] - - [int64x2_t, int32x2_t, 'N >= 1 && N <= 32', 'const { int64x2_t([-N as i64, -N as i64]) }'] + - [int16x8_t, int8x8_t, 'N >= 1 && N <= 8', 'const { int16x8_t([-N as i16; 8]) }'] + - [int32x4_t, int16x4_t, 'N >= 1 && N <= 16', 'const { int32x4_t([-N; 4]) }'] + - [int64x2_t, int32x2_t, 'N >= 1 && N <= 32', 'const { int64x2_t([-N as i64; 2]) }'] compose: - FnCall: [static_assert!, ["{type[2]}"]] - LLVMLink: @@ -7929,9 +7885,9 @@ intrinsics: static_defs: ['const N: i32'] safety: safe types: - - [int16x8_t, uint8x8_t, 'N >= 1 && N <= 8', 'const { int16x8_t([-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16]) }'] - - [int32x4_t, uint16x4_t, 'N >= 1 && N <= 16', 'const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }'] - - [int64x2_t, uint32x2_t, 'N >= 1 && N <= 32', 'const { int64x2_t([-N as i64, -N as i64]) }'] + - [int16x8_t, uint8x8_t, 'N >= 1 && N <= 8', 'const { int16x8_t([-N as i16; 8]) }'] + - [int32x4_t, uint16x4_t, 'N >= 1 && N <= 16', 'const { int32x4_t([-N; 4]) }'] + - [int64x2_t, uint32x2_t, 'N >= 1 && N <= 32', 'const { int64x2_t([-N as i64; 2]) }'] compose: - FnCall: [static_assert!, ["{type[2]}"]] - LLVMLink: @@ -8105,9 +8061,9 @@ intrinsics: static_defs: ['const N: i32'] safety: safe types: - - [int16x8_t, int8x8_t, 'N >= 1 && N <= 8', 'const { int16x8_t([-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16]) }'] - - [int32x4_t, int16x4_t, 'N >= 1 && N <= 16', 'const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }'] - - [int64x2_t, int32x2_t, 'N >= 1 && N <= 32', 'const { int64x2_t([-N as i64, -N as i64]) }'] + - [int16x8_t, int8x8_t, 'N >= 1 && N <= 8', 'const { int16x8_t([-N as i16; 8]) }'] + - [int32x4_t, int16x4_t, 'N >= 1 && N <= 16', 'const { int32x4_t([-N; 4]) }'] + - [int64x2_t, int32x2_t, 'N >= 1 && N <= 32', 'const { int64x2_t([-N as i64; 2]) }'] compose: - FnCall: [static_assert!, ["{type[2]}"]] - LLVMLink: @@ -8215,9 +8171,9 @@ intrinsics: static_defs: ['const N: i32'] safety: safe types: - - [int16x8_t, uint8x8_t, 'N >= 1 && N <= 8', 'const { int16x8_t([-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16]) }'] - - [int32x4_t, uint16x4_t, 'N >= 1 && N <= 16', 'const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }'] - - [int64x2_t, uint32x2_t, 'N >= 1 && N <= 32', 'const { int64x2_t([-N as i64, -N as i64]) }'] + - [int16x8_t, uint8x8_t, 'N >= 1 && N <= 8', 'const { int16x8_t([-N as i16; 8]) }'] + - [int32x4_t, uint16x4_t, 'N >= 1 && N <= 16', 'const { int32x4_t([-N; 4]) }'] + - [int64x2_t, uint32x2_t, 'N >= 1 && N <= 32', 'const { int64x2_t([-N as i64; 2]) }'] compose: - FnCall: [static_assert!, ["{type[2]}"]] - LLVMLink: @@ -8939,9 +8895,9 @@ intrinsics: static_defs: ['const N: i32'] safety: safe types: - - [int16x8_t, int8x8_t, 'N >= 1 && N <= 8', 'const { int16x8_t([-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16]) }'] - - [int32x4_t, int16x4_t, 'N >= 1 && N <= 16', 'const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }'] - - [int64x2_t, int32x2_t, 'N >= 1 && N <= 32', 'const { int64x2_t([-N as i64, -N as i64]) }'] + - [int16x8_t, int8x8_t, 'N >= 1 && N <= 8', 'const { int16x8_t([-N as i16; 8]) }'] + - [int32x4_t, int16x4_t, 'N >= 1 && N <= 16', 'const { int32x4_t([-N; 4]) }'] + - [int64x2_t, int32x2_t, 'N >= 1 && N <= 32', 'const { int64x2_t([-N as i64; 2]) }'] compose: - FnCall: [static_assert!, ["{type[2]}"]] - LLVMLink: @@ -9576,7 +9532,8 @@ intrinsics: attr: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vtrn]]}]] - - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [trn]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [trn1]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [trn2]]}]] - *neon-not-arm-stable - *neon-cfg-arm-unstable safety: safe @@ -9617,7 +9574,8 @@ intrinsics: attr: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vtrn]]}]] - - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [trn]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [trn1]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [trn2]]}]] - *neon-fp16 - *neon-unstable-f16 safety: safe @@ -9645,7 +9603,8 @@ intrinsics: attr: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vtrn]]}]] - - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [zip]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [zip1]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [zip2]]}]] - *neon-not-arm-stable - *neon-cfg-arm-unstable safety: safe @@ -9673,7 +9632,8 @@ intrinsics: attr: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vorr]]}]] - - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [zip]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [zip1]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [zip2]]}]] - *neon-not-arm-stable - *neon-cfg-arm-unstable safety: safe @@ -9707,7 +9667,8 @@ intrinsics: attr: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vtrn]]}]] - - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [zip]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [zip1]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [zip2]]}]] - *neon-not-arm-stable - *neon-cfg-arm-unstable safety: safe @@ -9735,7 +9696,8 @@ intrinsics: attr: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vzip]]}]] - - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [zip]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [zip1]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [zip2]]}]] - *neon-not-arm-stable - *neon-cfg-arm-unstable safety: safe @@ -9767,7 +9729,8 @@ intrinsics: attr: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vzip.16"']]}]] - - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [zip]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [zip1]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [zip2]]}]] - *neon-fp16 - *neon-unstable-f16 safety: safe @@ -9794,7 +9757,8 @@ intrinsics: attr: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vuzp]]}]] - - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [uzp]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [uzp1]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [uzp2]]}]] - *neon-not-arm-stable - *neon-cfg-arm-unstable safety: safe @@ -9835,7 +9799,8 @@ intrinsics: attr: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vuzp]]}]] - - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [uzp]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [uzp1]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [uzp2]]}]] - *neon-fp16 - *neon-unstable-f16 safety: safe @@ -9863,7 +9828,8 @@ intrinsics: attr: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vtrn]]}]] - - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [zip]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [zip1]]}]] + - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [zip2]]}]] - *neon-not-arm-stable - *neon-cfg-arm-unstable safety: safe @@ -12881,13 +12847,16 @@ intrinsics: - int16x8_t - int32x4_t compose: - - LLVMLink: - name: "vabs{neon_type.no}" - links: - - link: "llvm.aarch64.neon.abs.{neon_type}" - arch: aarch64,arm64ec - - link: "llvm.arm.neon.vabs.{neon_type}" - arch: arm + - Let: + - neg + - "{neon_type}" + - FnCall: [simd_neg, [a]] + - Let: + - mask + - "{neon_type}" + - FnCall: [simd_ge, [a, neg]] + - FnCall: [simd_select, [mask, a, neg]] + - name: "vpmin{neon_type.no}" doc: "Folding minimum of adjacent pairs" @@ -13862,8 +13831,8 @@ intrinsics: - [int8x16_t, '8', '1 <= N && N <= 8', 'v16i8', 'int8x16_t::splat', '-N as i8'] - [int16x4_t, '16', '1 <= N && N <= 16', 'v4i16', 'int16x4_t::splat', '-N as i16'] - [int16x8_t, '16', '1 <= N && N <= 16', 'v8i16', 'int16x8_t::splat', '-N as i16'] - - [int32x2_t, '32', '1 <= N && N <= 32', 'v2i32', 'int32x2_t::splat', '-N as i32'] - - [int32x4_t, '32', '1 <= N && N <= 32', 'v4i32', 'int32x4_t::splat', '-N as i32'] + - [int32x2_t, '32', '1 <= N && N <= 32', 'v2i32', 'int32x2_t::splat', '-N'] + - [int32x4_t, '32', '1 <= N && N <= 32', 'v4i32', 'int32x4_t::splat', '-N'] - [int64x1_t, '64', '1 <= N && N <= 64', 'v1i64', 'int64x1_t::splat', '-N as i64'] - [int64x2_t, '64', '1 <= N && N <= 64', 'v2i64', 'int64x2_t::splat', '-N as i64'] compose: @@ -13891,8 +13860,8 @@ intrinsics: - [uint8x16_t, "neon,v7", '8', 'static_assert_uimm_bits!', 'N, 3', 'v16i8', 'int8x16_t::splat', 'N as i8'] - [uint16x4_t, "neon,v7", '16', 'static_assert_uimm_bits!', 'N, 4', 'v4i16', 'int16x4_t::splat', 'N as i16'] - [uint16x8_t, "neon,v7", '16', 'static_assert_uimm_bits!', 'N, 4', 'v8i16', 'int16x8_t::splat', 'N as i16'] - - [uint32x2_t, "neon,v7", '32', 'static_assert!', 'N >= 0 && N <= 31', 'v2i32', 'int32x2_t::splat', 'N as i32'] - - [uint32x4_t, "neon,v7", '32', 'static_assert!', 'N >= 0 && N <= 31', 'v4i32', 'int32x4_t::splat', 'N as i32'] + - [uint32x2_t, "neon,v7", '32', 'static_assert!', 'N >= 0 && N <= 31', 'v2i32', 'int32x2_t::splat', 'N'] + - [uint32x4_t, "neon,v7", '32', 'static_assert!', 'N >= 0 && N <= 31', 'v4i32', 'int32x4_t::splat', 'N'] - [uint64x1_t, "neon,v7", '64', 'static_assert!', 'N >= 0 && N <= 63', 'v1i64', 'int64x1_t::splat', 'N as i64'] - [uint64x2_t, "neon,v7", '64', 'static_assert!', 'N >= 0 && N <= 63', 'v2i64', 'int64x2_t::splat', 'N as i64'] - [poly8x8_t, "neon,v7", '8', 'static_assert_uimm_bits!', 'N, 3', 'v8i8', 'int8x8_t::splat', 'N as i8'] @@ -14138,6 +14107,7 @@ intrinsics: doc: "Load one single-element structure and Replicate to all lanes (of one register)." arguments: ["ptr: {type[1]}"] return_type: "{neon_type[2]}" + big_endian_inverse: false attr: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, { FnCall: [assert_instr, ['"{type[3]}"']] } ]] @@ -14147,40 +14117,36 @@ intrinsics: safety: unsafe: [neon] types: - - ['vld1_dup_s8', '*const i8', 'int8x8_t', 'vld1.8', 'ld1r', 'vld1_lane_s8::<0>', 'i8x8::splat(0)', '[0, 0, 0, 0, 0, 0, 0, 0]'] - - ['vld1_dup_u8', '*const u8', 'uint8x8_t', 'vld1.8', 'ld1r', 'vld1_lane_u8::<0>', 'u8x8::splat(0)', '[0, 0, 0, 0, 0, 0, 0, 0]'] - - ['vld1_dup_p8', '*const p8', 'poly8x8_t', 'vld1.8', 'ld1r', 'vld1_lane_p8::<0>', 'u8x8::splat(0)', '[0, 0, 0, 0, 0, 0, 0, 0]'] + - ['vld1_dup_s8', '*const i8', 'int8x8_t', 'vld1.8', 'ld1r', 'i8x8::splat'] + - ['vld1_dup_u8', '*const u8', 'uint8x8_t', 'vld1.8', 'ld1r', 'u8x8::splat'] + - ['vld1_dup_p8', '*const p8', 'poly8x8_t', 'vld1.8', 'ld1r', 'u8x8::splat'] - - ['vld1q_dup_s8', '*const i8', 'int8x16_t', 'vld1.8', 'ld1r', 'vld1q_lane_s8::<0>', 'i8x16::splat(0)', '[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'] - - ['vld1q_dup_u8', '*const u8', 'uint8x16_t', 'vld1.8', 'ld1r', 'vld1q_lane_u8::<0>', 'u8x16::splat(0)', '[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'] - - ['vld1q_dup_p8', '*const p8', 'poly8x16_t', 'vld1.8', 'ld1r', 'vld1q_lane_p8::<0>', 'u8x16::splat(0)', '[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'] + - ['vld1q_dup_s8', '*const i8', 'int8x16_t', 'vld1.8', 'ld1r', 'i8x16::splat'] + - ['vld1q_dup_u8', '*const u8', 'uint8x16_t', 'vld1.8', 'ld1r', 'u8x16::splat'] + - ['vld1q_dup_p8', '*const p8', 'poly8x16_t', 'vld1.8', 'ld1r', 'u8x16::splat'] - - ['vld1_dup_s16', '*const i16', 'int16x4_t', 'vld1.16', 'ld1r', 'vld1_lane_s16::<0>', 'i16x4::splat(0)', '[0, 0, 0, 0]'] - - ['vld1_dup_u16', '*const u16', 'uint16x4_t', 'vld1.16', 'ld1r', 'vld1_lane_u16::<0>', 'u16x4::splat(0)', '[0, 0, 0, 0]'] - - ['vld1_dup_p16', '*const p16', 'poly16x4_t', 'vld1.16', 'ld1r', 'vld1_lane_p16::<0>', 'u16x4::splat(0)', '[0, 0, 0, 0]'] + - ['vld1_dup_s16', '*const i16', 'int16x4_t', 'vld1.16', 'ld1r', 'i16x4::splat'] + - ['vld1_dup_u16', '*const u16', 'uint16x4_t', 'vld1.16', 'ld1r', 'u16x4::splat'] + - ['vld1_dup_p16', '*const p16', 'poly16x4_t', 'vld1.16', 'ld1r', 'u16x4::splat'] - - ['vld1q_dup_s16', '*const i16', 'int16x8_t', 'vld1.16', 'ld1r', 'vld1q_lane_s16::<0>', 'i16x8::splat(0)', '[0, 0, 0, 0, 0, 0, 0, 0]'] - - ['vld1q_dup_u16', '*const u16', 'uint16x8_t', 'vld1.16', 'ld1r', 'vld1q_lane_u16::<0>', 'u16x8::splat(0)', '[0, 0, 0, 0, 0, 0, 0, 0]'] - - ['vld1q_dup_p16', '*const p16', 'poly16x8_t', 'vld1.16', 'ld1r', 'vld1q_lane_p16::<0>', 'u16x8::splat(0)', '[0, 0, 0, 0, 0, 0, 0, 0]'] + - ['vld1q_dup_s16', '*const i16', 'int16x8_t', 'vld1.16', 'ld1r', 'i16x8::splat'] + - ['vld1q_dup_u16', '*const u16', 'uint16x8_t', 'vld1.16', 'ld1r', 'u16x8::splat'] + - ['vld1q_dup_p16', '*const p16', 'poly16x8_t', 'vld1.16', 'ld1r', 'u16x8::splat'] - - ['vld1_dup_s32', '*const i32', 'int32x2_t', 'vld1.32', 'ld1r', 'vld1_lane_s32::<0>', 'i32x2::splat(0)', '[0, 0]'] - - ['vld1_dup_u32', '*const u32', 'uint32x2_t', 'vld1.32', 'ld1r', 'vld1_lane_u32::<0>', 'u32x2::splat(0)', '[0, 0]'] - - ['vld1_dup_f32', '*const f32', 'float32x2_t', 'vld1.32', 'ld1r', 'vld1_lane_f32::<0>', 'f32x2::splat(0.0)', '[0, 0]'] + - ['vld1_dup_s32', '*const i32', 'int32x2_t', 'vld1.32', 'ld1r', 'i32x2::splat'] + - ['vld1_dup_u32', '*const u32', 'uint32x2_t', 'vld1.32', 'ld1r', 'u32x2::splat'] + - ['vld1_dup_f32', '*const f32', 'float32x2_t', 'vld1.32', 'ld1r', 'f32x2::splat'] - - ['vld1q_dup_s32', '*const i32', 'int32x4_t', 'vld1.32', 'ld1r', 'vld1q_lane_s32::<0>', 'i32x4::splat(0)', '[0, 0, 0, 0]'] - - ['vld1q_dup_u32', '*const u32', 'uint32x4_t', 'vld1.32', 'ld1r', 'vld1q_lane_u32::<0>', 'u32x4::splat(0)', '[0, 0, 0, 0]'] - - ['vld1q_dup_f32', '*const f32', 'float32x4_t', 'vld1.32', 'ld1r', 'vld1q_lane_f32::<0>', 'f32x4::splat(0.0)', '[0, 0, 0, 0]'] + - ['vld1q_dup_s32', '*const i32', 'int32x4_t', 'vld1.32', 'ld1r', 'i32x4::splat'] + - ['vld1q_dup_u32', '*const u32', 'uint32x4_t', 'vld1.32', 'ld1r', 'u32x4::splat'] + - ['vld1q_dup_f32', '*const f32', 'float32x4_t', 'vld1.32', 'ld1r', 'f32x4::splat'] - - ['vld1q_dup_s64', '*const i64', 'int64x2_t', 'vldr', 'ld1', 'vld1q_lane_s64::<0>', 'i64x2::splat(0)', '[0, 0]'] - - ['vld1q_dup_u64', '*const u64', 'uint64x2_t', 'vldr', 'ld1', 'vld1q_lane_u64::<0>', 'u64x2::splat(0)', '[0, 0]'] + - ['vld1q_dup_s64', '*const i64', 'int64x2_t', 'vldr', 'ld1r', 'i64x2::splat'] + - ['vld1q_dup_u64', '*const u64', 'uint64x2_t', 'vldr', 'ld1r', 'u64x2::splat'] compose: - - Let: - - x - - FnCall: - - '{type[5]}' - - - ptr - - FnCall: [transmute, ['{type[6]}']] - - FnCall: ['simd_shuffle!', [x, x, '{type[7]}']] + - FnCall: + - transmute + - - FnCall: ['{type[5]}', ["*ptr"]] - name: "{type[0]}" doc: "Absolute difference and accumulate (64-bit)" diff --git a/library/stdarch/crates/stdarch-gen-arm/src/expression.rs b/library/stdarch/crates/stdarch-gen-arm/src/expression.rs index 56c94602fff94..d5644ef27d4b1 100644 --- a/library/stdarch/crates/stdarch-gen-arm/src/expression.rs +++ b/library/stdarch/crates/stdarch-gen-arm/src/expression.rs @@ -1,5 +1,4 @@ use itertools::Itertools; -use lazy_static::lazy_static; use proc_macro2::{Literal, Punct, Spacing, TokenStream}; use quote::{ToTokens, TokenStreamExt, format_ident, quote}; use regex::Regex; @@ -7,6 +6,7 @@ use serde::de::{self, MapAccess, Visitor}; use serde::{Deserialize, Deserializer, Serialize}; use std::fmt; use std::str::FromStr; +use std::sync::LazyLock; use crate::intrinsic::Intrinsic; use crate::wildstring::WildStringPart; @@ -374,10 +374,8 @@ impl FromStr for Expression { type Err = String; fn from_str(s: &str) -> Result { - lazy_static! { - static ref MACRO_RE: Regex = - Regex::new(r"^(?P[\w\d_]+)!\((?P.*?)\);?$").unwrap(); - } + static MACRO_RE: LazyLock = + LazyLock::new(|| Regex::new(r"^(?P[\w\d_]+)!\((?P.*?)\);?$").unwrap()); if s == "SvUndef" { Ok(Expression::SvUndef) diff --git a/library/stdarch/crates/stdarch-gen-arm/src/load_store_tests.rs b/library/stdarch/crates/stdarch-gen-arm/src/load_store_tests.rs index 5cf39b2e11aed..3f3bfed132c76 100644 --- a/library/stdarch/crates/stdarch-gen-arm/src/load_store_tests.rs +++ b/library/stdarch/crates/stdarch-gen-arm/src/load_store_tests.rs @@ -2,6 +2,7 @@ use std::fs::File; use std::io::Write; use std::path::PathBuf; use std::str::FromStr; +use std::sync::LazyLock; use crate::format_code; use crate::input::InputType; @@ -10,7 +11,6 @@ use crate::typekinds::BaseType; use crate::typekinds::{ToRepr, TypeKind}; use itertools::Itertools; -use lazy_static::lazy_static; use proc_macro2::TokenStream; use quote::{format_ident, quote}; @@ -639,8 +639,8 @@ impl LdIntrCharacteristics { } } -lazy_static! { - static ref PREAMBLE: String = format!( +static PREAMBLE: LazyLock = LazyLock::new(|| { + format!( r#"#![allow(unused)] use super::*; @@ -801,13 +801,11 @@ fn assert_vector_matches_u64(vector: svuint64_t, expected: svuint64_t) {{ assert!(!svptest_any(defined, cmp)) }} "# - ); -} + ) +}); -lazy_static! { - static ref MANUAL_TESTS: String = format!( - "#[simd_test(enable = \"sve\")] -unsafe fn test_ffr() {{ +const MANUAL_TESTS: &str = "#[simd_test(enable = \"sve\")] +unsafe fn test_ffr() { svsetffr(); let ffr = svrdffr(); assert_vector_matches_u8(svdup_n_u8_z(ffr, 1), svindex_u8(1, 0)); @@ -816,7 +814,5 @@ unsafe fn test_ffr() {{ svwrffr(pred); let ffr = svrdffr_z(svptrue_b8()); assert_vector_matches_u8(svdup_n_u8_z(ffr, 1), svdup_n_u8_z(pred, 1)); -}} -" - ); } +"; diff --git a/library/stdarch/crates/stdarch-gen-arm/src/typekinds.rs b/library/stdarch/crates/stdarch-gen-arm/src/typekinds.rs index 7c697cb7c0c43..bd47ff2bd1557 100644 --- a/library/stdarch/crates/stdarch-gen-arm/src/typekinds.rs +++ b/library/stdarch/crates/stdarch-gen-arm/src/typekinds.rs @@ -1,10 +1,10 @@ -use lazy_static::lazy_static; use proc_macro2::TokenStream; use quote::{ToTokens, TokenStreamExt, quote}; use regex::Regex; use serde_with::{DeserializeFromStr, SerializeDisplay}; use std::fmt; use std::str::FromStr; +use std::sync::LazyLock; use crate::context; use crate::expression::{Expression, FnCall}; @@ -496,9 +496,9 @@ impl FromStr for VectorType { type Err = String; fn from_str(s: &str) -> Result { - lazy_static! { - static ref RE: Regex = Regex::new(r"^(?:(?:sv(?P(?:uint|int|bool|float)(?:\d+)?))|(?:(?P(?:uint|int|bool|poly|float)(?:\d+)?)x(?P(?:\d+)?)))(?:x(?P2|3|4))?_t$").unwrap(); - } + static RE: LazyLock = LazyLock::new(|| { + Regex::new(r"^(?:(?:sv(?P(?:uint|int|bool|float)(?:\d+)?))|(?:(?P(?:uint|int|bool|poly|float)(?:\d+)?)x(?P(?:\d+)?)))(?:x(?P2|3|4))?_t$").unwrap() + }); if let Some(c) = RE.captures(s) { let (base_type, lanes) = Self::sanitise_lanes( @@ -698,9 +698,8 @@ impl FromStr for BaseType { type Err = String; fn from_str(s: &str) -> Result { - lazy_static! { - static ref RE: Regex = Regex::new(r"^(?P[a-zA-Z]+)(?P\d+)?(_t)?$").unwrap(); - } + static RE: LazyLock = + LazyLock::new(|| Regex::new(r"^(?P[a-zA-Z]+)(?P\d+)?(_t)?$").unwrap()); if let Some(c) = RE.captures(s) { let kind = c["kind"].parse()?; diff --git a/library/stdarch/crates/stdarch-gen-arm/src/wildcards.rs b/library/stdarch/crates/stdarch-gen-arm/src/wildcards.rs index 25aa803489270..6c40d88df45fd 100644 --- a/library/stdarch/crates/stdarch-gen-arm/src/wildcards.rs +++ b/library/stdarch/crates/stdarch-gen-arm/src/wildcards.rs @@ -1,8 +1,7 @@ -use lazy_static::lazy_static; use regex::Regex; use serde_with::{DeserializeFromStr, SerializeDisplay}; -use std::fmt; use std::str::FromStr; +use std::{fmt, sync::LazyLock}; use crate::{ fn_suffix::SuffixKind, @@ -66,9 +65,9 @@ impl FromStr for Wildcard { type Err = String; fn from_str(s: &str) -> Result { - lazy_static! { - static ref RE: Regex = Regex::new(r"^(?P\w+?)(?:_x(?P[2-4]))?(?:\[(?P\d+)\])?(?:\.(?P\w+))?(?:\s+as\s+(?P.*?))?$").unwrap(); - } + static RE: LazyLock = LazyLock::new(|| { + Regex::new(r"^(?P\w+?)(?:_x(?P[2-4]))?(?:\[(?P\d+)\])?(?:\.(?P\w+))?(?:\s+as\s+(?P.*?))?$").unwrap() + }); if let Some(c) = RE.captures(s) { let wildcard_name = &c["wildcard"]; diff --git a/library/stdarch/crates/stdarch-gen-loongarch/README.md b/library/stdarch/crates/stdarch-gen-loongarch/README.md index 1fc81483a12e7..2b3f00f34afff 100644 --- a/library/stdarch/crates/stdarch-gen-loongarch/README.md +++ b/library/stdarch/crates/stdarch-gen-loongarch/README.md @@ -11,7 +11,6 @@ LSX: # Generate bindings OUT_DIR=`pwd`/crates/stdarch-gen-loongarch cargo run -p stdarch-gen-loongarch -- crates/stdarch-gen-loongarch/lsxintrin.h OUT_DIR=`pwd`/crates/core_arch cargo run -p stdarch-gen-loongarch -- crates/stdarch-gen-loongarch/lsx.spec -rustfmt crates/core_arch/src/loongarch64/lsx/generated.rs # Generate tests OUT_DIR=`pwd`/crates/stdarch-gen-loongarch cargo run -p stdarch-gen-loongarch -- crates/stdarch-gen-loongarch/lsx.spec test @@ -25,7 +24,6 @@ LASX: # Generate bindings OUT_DIR=`pwd`/crates/stdarch-gen-loongarch cargo run -p stdarch-gen-loongarch -- crates/stdarch-gen-loongarch/lasxintrin.h OUT_DIR=`pwd`/crates/core_arch cargo run -p stdarch-gen-loongarch -- crates/stdarch-gen-loongarch/lasx.spec -rustfmt crates/core_arch/src/loongarch64/lasx/generated.rs # Generate tests OUT_DIR=`pwd`/crates/stdarch-gen-loongarch cargo run -p stdarch-gen-loongarch -- crates/stdarch-gen-loongarch/lasx.spec test diff --git a/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs b/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs index aa9990b6ccd13..40132097f5de7 100644 --- a/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs +++ b/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs @@ -274,13 +274,14 @@ fn gen_bind_body( } }; + let is_mem = in_t.iter().any(|s| s.contains("POINTER")); let is_store = current_name.to_string().contains("vst"); let link_function = { let fn_decl = { let fn_output = if out_t.to_lowercase() == "void" { String::new() } else { - format!("-> {}", type_to_rst(out_t, is_store)) + format!(" -> {}", type_to_rst(out_t, is_store)) }; let fn_inputs = match para_num { 1 => format!("(a: {})", type_to_rst(in_t[0], is_store)), @@ -304,7 +305,7 @@ fn gen_bind_body( ), _ => panic!("unsupported parameter number"), }; - format!("fn __{current_name}{fn_inputs} {fn_output};") + format!("fn __{current_name}{fn_inputs}{fn_output};") }; let function = format!( r#" #[link_name = "llvm.loongarch.{}"] @@ -456,31 +457,40 @@ fn gen_bind_body( }; rustc_legacy_const_generics = "rustc_legacy_const_generics(2, 3)"; } - format!("pub unsafe fn {current_name}{fn_inputs} {fn_output}") + format!( + "pub {}fn {current_name}{fn_inputs} {fn_output}", + if is_mem { "unsafe " } else { "" } + ) }; + let unsafe_start = if !is_mem { "unsafe { " } else { "" }; + let unsafe_end = if !is_mem { " }" } else { "" }; let mut call_params = { match para_num { - 1 => format!("__{current_name}(a)"), - 2 => format!("__{current_name}(a, b)"), - 3 => format!("__{current_name}(a, b, c)"), - 4 => format!("__{current_name}(a, b, c, d)"), + 1 => format!("{unsafe_start}__{current_name}(a){unsafe_end}"), + 2 => format!("{unsafe_start}__{current_name}(a, b){unsafe_end}"), + 3 => format!("{unsafe_start}__{current_name}(a, b, c){unsafe_end}"), + 4 => format!("{unsafe_start}__{current_name}(a, b, c, d){unsafe_end}"), _ => panic!("unsupported parameter number"), } }; if para_num == 1 && in_t[0] == "HI" { call_params = match asm_fmts[1].as_str() { "si10" => { - format!("static_assert_simm_bits!(IMM_S10, 10);\n __{current_name}(IMM_S10)") + format!( + "static_assert_simm_bits!(IMM_S10, 10);\n {unsafe_start}__{current_name}(IMM_S10){unsafe_end}" + ) } "i13" => { - format!("static_assert_simm_bits!(IMM_S13, 13);\n __{current_name}(IMM_S13)") + format!( + "static_assert_simm_bits!(IMM_S13, 13);\n {unsafe_start}__{current_name}(IMM_S13){unsafe_end}" + ) } _ => panic!("unsupported assembly format: {}", asm_fmts[2]), } } else if para_num == 2 && (in_t[1] == "UQI" || in_t[1] == "USI") { call_params = if asm_fmts[2].starts_with("ui") { format!( - "static_assert_uimm_bits!(IMM{0}, {0});\n __{current_name}(a, IMM{0})", + "static_assert_uimm_bits!(IMM{0}, {0});\n {unsafe_start}__{current_name}(a, IMM{0}){unsafe_end}", asm_fmts[2].get(2..).unwrap() ) } else { @@ -489,14 +499,16 @@ fn gen_bind_body( } else if para_num == 2 && in_t[1] == "QI" { call_params = match asm_fmts[2].as_str() { "si5" => { - format!("static_assert_simm_bits!(IMM_S5, 5);\n __{current_name}(a, IMM_S5)") + format!( + "static_assert_simm_bits!(IMM_S5, 5);\n {unsafe_start}__{current_name}(a, IMM_S5){unsafe_end}" + ) } _ => panic!("unsupported assembly format: {}", asm_fmts[2]), }; } else if para_num == 2 && in_t[0] == "CVPOINTER" && in_t[1] == "SI" { call_params = if asm_fmts[2].starts_with("si") { format!( - "static_assert_simm_bits!(IMM_S{0}, {0});\n __{current_name}(mem_addr, IMM_S{0})", + "static_assert_simm_bits!(IMM_S{0}, {0});\n {unsafe_start}__{current_name}(mem_addr, IMM_S{0}){unsafe_end}", asm_fmts[2].get(2..).unwrap() ) } else { @@ -504,13 +516,13 @@ fn gen_bind_body( } } else if para_num == 2 && in_t[0] == "CVPOINTER" && in_t[1] == "DI" { call_params = match asm_fmts[2].as_str() { - "rk" => format!("__{current_name}(mem_addr, b)"), + "rk" => format!("{unsafe_start}__{current_name}(mem_addr, b){unsafe_end}"), _ => panic!("unsupported assembly format: {}", asm_fmts[2]), }; } else if para_num == 3 && (in_t[2] == "USI" || in_t[2] == "UQI") { call_params = if asm_fmts[2].starts_with("ui") { format!( - "static_assert_uimm_bits!(IMM{0}, {0});\n __{current_name}(a, b, IMM{0})", + "static_assert_uimm_bits!(IMM{0}, {0});\n {unsafe_start}__{current_name}(a, b, IMM{0}){unsafe_end}", asm_fmts[2].get(2..).unwrap() ) } else { @@ -519,19 +531,19 @@ fn gen_bind_body( } else if para_num == 3 && in_t[1] == "CVPOINTER" && in_t[2] == "SI" { call_params = match asm_fmts[2].as_str() { "si12" => format!( - "static_assert_simm_bits!(IMM_S12, 12);\n __{current_name}(a, mem_addr, IMM_S12)" + "static_assert_simm_bits!(IMM_S12, 12);\n {unsafe_start}__{current_name}(a, mem_addr, IMM_S12){unsafe_end}" ), _ => panic!("unsupported assembly format: {}", asm_fmts[2]), }; } else if para_num == 3 && in_t[1] == "CVPOINTER" && in_t[2] == "DI" { call_params = match asm_fmts[2].as_str() { - "rk" => format!("__{current_name}(a, mem_addr, b)"), + "rk" => format!("{unsafe_start}__{current_name}(a, mem_addr, b){unsafe_end}"), _ => panic!("unsupported assembly format: {}", asm_fmts[2]), }; } else if para_num == 4 { call_params = match (asm_fmts[2].as_str(), current_name.chars().last().unwrap()) { ("si8", t) => format!( - "static_assert_simm_bits!(IMM_S8, 8);\n static_assert_uimm_bits!(IMM{0}, {0});\n __{current_name}(a, mem_addr, IMM_S8, IMM{0})", + "static_assert_simm_bits!(IMM_S8, 8);\n static_assert_uimm_bits!(IMM{0}, {0});\n {unsafe_start}__{current_name}(a, mem_addr, IMM_S8, IMM{0}){unsafe_end}", type_to_imm(t) ), (_, _) => panic!( diff --git a/library/stdarch/crates/stdarch-test/Cargo.toml b/library/stdarch/crates/stdarch-test/Cargo.toml index e4791e4ec5251..e88258bfd30d0 100644 --- a/library/stdarch/crates/stdarch-test/Cargo.toml +++ b/library/stdarch/crates/stdarch-test/Cargo.toml @@ -7,7 +7,6 @@ edition = "2024" [dependencies] assert-instr-macro = { path = "../assert-instr-macro" } simd-test-macro = { path = "../simd-test-macro" } -lazy_static = "1.0" rustc-demangle = "0.1.8" cfg-if = "1.0" @@ -20,7 +19,7 @@ cc = "1.0" # time, and we want to make updates to this explicit rather than automatically # picking up updates which might break CI with new instruction names. [target.'cfg(target_arch = "wasm32")'.dependencies] -wasmprinter = "=0.2.67" +wasmprinter = "=0.235" [features] default = [] diff --git a/library/stdarch/crates/stdarch-test/src/lib.rs b/library/stdarch/crates/stdarch-test/src/lib.rs index f6614f6d51c90..ecaf95f61766a 100644 --- a/library/stdarch/crates/stdarch-test/src/lib.rs +++ b/library/stdarch/crates/stdarch-test/src/lib.rs @@ -6,14 +6,12 @@ #![deny(rust_2018_idioms)] #![allow(clippy::missing_docs_in_private_items, clippy::print_stdout)] -#[macro_use] -extern crate lazy_static; #[macro_use] extern crate cfg_if; pub use assert_instr_macro::*; pub use simd_test_macro::*; -use std::{cmp, collections::HashSet, env, hash, hint::black_box, str}; +use std::{cmp, collections::HashSet, env, hash, hint::black_box, str, sync::LazyLock}; cfg_if! { if #[cfg(target_arch = "wasm32")] { @@ -25,9 +23,7 @@ cfg_if! { } } -lazy_static! { - static ref DISASSEMBLY: HashSet = disassemble_myself(); -} +static DISASSEMBLY: LazyLock> = LazyLock::new(disassemble_myself); #[derive(Debug)] struct Function { @@ -65,11 +61,12 @@ pub fn assert(shim_addr: usize, fnname: &str, expected: &str) { black_box(shim_addr); //eprintln!("shim name: {fnname}"); - let function = &DISASSEMBLY - .get(&Function::new(fnname)) - .unwrap_or_else(|| panic!("function \"{fnname}\" not found in the disassembly")); + let Some(function) = &DISASSEMBLY.get(&Function::new(fnname)) else { + panic!("function `{fnname}` not found in the disassembly") + }; //eprintln!(" function: {:?}", function); + // Trim any filler instructions. let mut instrs = &function.instrs[..]; while instrs.last().is_some_and(|s| s == "nop" || s == "int3") { instrs = &instrs[..instrs.len() - 1]; @@ -84,12 +81,26 @@ pub fn assert(shim_addr: usize, fnname: &str, expected: &str) { // 2. It is a mark, indicating that the instruction will be // compiled into other instructions - mainly because of llvm // optimization. - let expected = if expected == "unknown" { - "" // Workaround for rust-lang/stdarch#1674, todo: remove when the issue is fixed - } else { - expected + let expected = match expected { + // `` is what LLVM will generate for unknown instructions. We use this to fail + // loudly when LLVM does start supporting these instructions. + // + // This was introduced in https://github.com/rust-lang/stdarch/pull/1674 to work around the + // RISC-V P extension not yet being supported. + "unknown" => "", + _ => expected, }; - let found = expected == "nop" || instrs.iter().any(|s| s.starts_with(expected)); + + // Check whether the given instruction is part of the disassemblied body. + let found = expected == "nop" + || instrs.iter().any(|instruction| { + instruction.starts_with(expected) + // Check that the next character is non-alphanumeric. This prevents false negatives + // when e.g. `fminnm` was used but `fmin` was expected. + // + // TODO: resolve the conflicts (x86_64 and aarch64 have a bunch, probably others) + // && !instruction[expected.len()..].starts_with(|c: char| c.is_ascii_alphanumeric()) + }); // Look for subroutine call instructions in the disassembly to detect whether // inlining failed: all intrinsics are `#[inline(always)]`, so calling one diff --git a/library/stdarch/examples/Cargo.toml b/library/stdarch/examples/Cargo.toml index 61184494e1573..61451edee841c 100644 --- a/library/stdarch/examples/Cargo.toml +++ b/library/stdarch/examples/Cargo.toml @@ -12,7 +12,6 @@ default-run = "hex" [dependencies] core_arch = { path = "../crates/core_arch" } -std_detect = { path = "../crates/std_detect" } quickcheck = "1.0" rand = "0.8" diff --git a/library/stdarch/examples/connect5.rs b/library/stdarch/examples/connect5.rs index 2b451f45d71c0..371b28552b32d 100644 --- a/library/stdarch/examples/connect5.rs +++ b/library/stdarch/examples/connect5.rs @@ -40,9 +40,13 @@ use std::cmp; use std::time::Instant; #[cfg(target_arch = "x86")] -use {core_arch::arch::x86::*, std_detect::is_x86_feature_detected}; +use core_arch::arch::x86::*; #[cfg(target_arch = "x86_64")] -use {core_arch::arch::x86_64::*, std_detect::is_x86_feature_detected}; +use core_arch::arch::x86_64::*; +#[cfg(target_arch = "x86")] +use std::is_x86_feature_detected; +#[cfg(target_arch = "x86_64")] +use std::is_x86_feature_detected; // types @@ -558,7 +562,12 @@ fn search(pos: &Pos, alpha: i32, beta: i32, depth: i32, _ply: i32) -> i32 { assert_ne!(bm, MOVE_NONE); assert!(bs >= -EVAL_INF && bs <= EVAL_INF); - if _ply == 0 { bm } else { bs } //best move at the root node, best score elsewhere + //best move at the root node, best score elsewhere + if _ply == 0 { + bm + } else { + bs + } } /// Evaluation function: give different scores to different patterns after a fixed depth. @@ -570,15 +579,11 @@ fn eval(pos: &Pos, _ply: i32) -> i32 { #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] { if check_x86_avx512_features() { - unsafe { - if check_patternlive4_avx512(pos, def) { - return -4096; - } - } - } else { - if check_patternlive4(pos, def) { + if unsafe { check_patternlive4_avx512(pos, def) } { return -4096; } + } else if check_patternlive4(pos, def) { + return -4096; } } @@ -593,15 +598,11 @@ fn eval(pos: &Pos, _ply: i32) -> i32 { #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] { if check_x86_avx512_features() { - unsafe { - if check_patternlive4_avx512(pos, atk) { - return 2560; - } - } - } else { - if check_patternlive4(pos, atk) { + if unsafe { check_patternlive4_avx512(pos, atk) } { return 2560; } + } else if check_patternlive4(pos, atk) { + return 2560; } } @@ -616,15 +617,11 @@ fn eval(pos: &Pos, _ply: i32) -> i32 { #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] { if check_x86_avx512_features() { - unsafe { - if check_patterndead4_avx512(pos, atk) > 0 { - return 2560; - } - } - } else { - if check_patterndead4(pos, atk) > 0 { + if unsafe { check_patterndead4_avx512(pos, atk) > 0 } { return 2560; } + } else if check_patterndead4(pos, atk) > 0 { + return 2560; } } @@ -909,9 +906,7 @@ fn pos_is_winner_avx512(pos: &Pos) -> bool { 0b00_10_10_10_10_11_10_10_10_10_11_11_11_11_11_10]; let mut count_match: i32 = 0; - for dir in 0..2 { - // direction 0 and 1 - let mut board0 = board0org[dir]; + for mut board0 in board0org { let boardf = _mm512_and_si512(answer, board0); let temp_mask = _mm512_mask_cmpeq_epi16_mask(answer_mask[0], answer, boardf); count_match += _popcnt32(temp_mask as i32); diff --git a/library/stdarch/examples/hex.rs b/library/stdarch/examples/hex.rs index e393ad7271689..95ffbaf666cea 100644 --- a/library/stdarch/examples/hex.rs +++ b/library/stdarch/examples/hex.rs @@ -36,9 +36,13 @@ use std::{ }; #[cfg(target_arch = "x86")] -use {core_arch::arch::x86::*, std_detect::is_x86_feature_detected}; +use core_arch::arch::x86::*; #[cfg(target_arch = "x86_64")] -use {core_arch::arch::x86_64::*, std_detect::is_x86_feature_detected}; +use core_arch::arch::x86_64::*; +#[cfg(target_arch = "x86")] +use std::is_x86_feature_detected; +#[cfg(target_arch = "x86_64")] +use std::is_x86_feature_detected; fn main() { let mut input = Vec::new(); diff --git a/library/stdarch/triagebot.toml b/library/stdarch/triagebot.toml index 75eb642e99631..2c281c8f7d429 100644 --- a/library/stdarch/triagebot.toml +++ b/library/stdarch/triagebot.toml @@ -1,7 +1,7 @@ [assign] [assign.owners] -"*" = ["@Amanieu"] +"*" = ["@Amanieu", "@folkertdev", "@sayantn"] [ping.windows] message = """\ diff --git a/library/sysroot/Cargo.toml b/library/sysroot/Cargo.toml index 032f5272a9cd4..7b4aeed94e980 100644 --- a/library/sysroot/Cargo.toml +++ b/library/sysroot/Cargo.toml @@ -6,6 +6,8 @@ version = "0.0.0" edition = "2024" [lib] +test = false +bench = false # make sure this crate isn't included in public standard library docs doc = false @@ -23,9 +25,7 @@ backtrace = ["std/backtrace"] backtrace-trace-only = ["std/backtrace-trace-only"] compiler-builtins-c = ["std/compiler-builtins-c"] compiler-builtins-mem = ["std/compiler-builtins-mem"] -compiler-builtins-no-asm = ["std/compiler-builtins-no-asm"] compiler-builtins-no-f16-f128 = ["std/compiler-builtins-no-f16-f128"] -compiler-builtins-mangled-names = ["std/compiler-builtins-mangled-names"] debug_refcell = ["std/debug_refcell"] llvm-libunwind = ["std/llvm-libunwind"] system-llvm-libunwind = ["std/system-llvm-libunwind"] diff --git a/library/windows_targets/Cargo.toml b/library/windows_targets/Cargo.toml index 705c9e0438119..1c804a0ab391f 100644 --- a/library/windows_targets/Cargo.toml +++ b/library/windows_targets/Cargo.toml @@ -4,6 +4,11 @@ description = "A drop-in replacement for the real windows-targets crate for use version = "0.0.0" edition = "2024" +[lib] +test = false +bench = false +doc = false + [features] # Enable using raw-dylib for Windows imports. # This will eventually be the default. diff --git a/library/windows_targets/src/lib.rs b/library/windows_targets/src/lib.rs index 9e82e6a720006..3446e2113dda9 100644 --- a/library/windows_targets/src/lib.rs +++ b/library/windows_targets/src/lib.rs @@ -34,22 +34,12 @@ pub macro link_dylib { #[cfg(feature = "windows_raw_dylib")] pub macro link($($tt:tt)*) { - $crate::link_raw_dylib!($($tt)*) + $crate::link_raw_dylib!($($tt)*); } #[cfg(not(feature = "windows_raw_dylib"))] -pub macro link { - ($library:literal $abi:literal $($link_name:literal)? $(#[$doc:meta])? fn $($function:tt)*) => ( - // Note: the windows-targets crate uses a pre-built Windows.lib import library which we don't - // have in this repo. So instead we always link kernel32.lib and add the rest of the import - // libraries below by using an empty extern block. This works because extern blocks are not - // connected to the library given in the #[link] attribute. - #[link(name = "kernel32")] - unsafe extern $abi { - $(#[link_name=$link_name])? - pub fn $($function)*; - } - ) +pub macro link($($tt:tt)*) { + $crate::link_dylib!($($tt)*); } #[cfg(not(feature = "windows_raw_dylib"))] diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 82b33738ad241..69b0a387eb9a3 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -2,5 +2,5 @@ # standard library we currently track. [toolchain] -channel = "nightly-2025-07-21" +channel = "nightly-2025-07-30" components = ["llvm-tools-preview", "rustc-dev", "rust-src", "rustfmt"] diff --git a/tool_config/kani-version.toml b/tool_config/kani-version.toml index 8ad1d860155b6..fd33055b77a92 100644 --- a/tool_config/kani-version.toml +++ b/tool_config/kani-version.toml @@ -2,4 +2,4 @@ # incompatible with the verify-std repo. [kani] -commit = "c1648416995a15b6ab33e44829e96c9a65541927" +commit = "1954c10d033221e8085d597464bc413865152e27" From cca65d34563e63d0fd1bd9f02f9e4c31f5ce65fb Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:09:59 +0000 Subject: [PATCH 11/20] Merge subtree update for toolchain nightly-2025-07-31 (#437) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is an automated PR to merge library subtree updates from 2025-07-30 (rust-lang/rust@ba7e63b63871a429533c189adbfb1d9a6337e000) to 2025-07-31 (rust-lang/rust@3048886e59c94470e726ecaaf2add7242510ac11), inclusive. This is a clean merge, no conflicts were detected. **Do not remove or edit the following annotations:** git-subtree-dir: library git-subtree-split: af745a4faf63d52814a5f8df25611b24454a76c8 --------- Signed-off-by: xizheyin Signed-off-by: Ayush Singh Co-authored-by: Roger Curley Co-authored-by: Ralf Jung Co-authored-by: Marijn Schouten Co-authored-by: Josh Triplett Co-authored-by: Matthias Krüger <476013+matthiaskrgr@users.noreply.github.com> Co-authored-by: Folkert de Vries Co-authored-by: okaneco <47607823+okaneco@users.noreply.github.com> Co-authored-by: Folkert de Vries Co-authored-by: Sayantan Chakraborty <142906350+sayantn@users.noreply.github.com> Co-authored-by: nazo6 Co-authored-by: bors Co-authored-by: Jakub Beránek Co-authored-by: Orson Peters Co-authored-by: León Orell Valerian Liehr Co-authored-by: xizheyin Co-authored-by: Deadbeef Co-authored-by: René Kijewski Co-authored-by: Nik Revenco Co-authored-by: The Miri Cronjob Bot Co-authored-by: Tim (Theemathas) Chirananthavat Co-authored-by: Chris Denton Co-authored-by: Samuel Tardieu Co-authored-by: Travis Cross Co-authored-by: Amanieu d'Antras Co-authored-by: Oli Scherer Co-authored-by: David Mládek Co-authored-by: SunkenPotato <3.14radius02@gmail.com> Co-authored-by: Cameron Steffen Co-authored-by: Martin Ombura Jr <8682597+martinomburajr@users.noreply.github.com> Co-authored-by: Madhav Madhusoodanan Co-authored-by: sayantn Co-authored-by: Trevor Gross Co-authored-by: WANG Rui Co-authored-by: Luigi Sartor Piucco Co-authored-by: Julien THILLARD <54775010+supersurviveur@users.noreply.github.com> Co-authored-by: The rustc-josh-sync Cronjob Bot Co-authored-by: usamoi Co-authored-by: Rémy Rakic Co-authored-by: Nurzhan Sakén Co-authored-by: ltdk Co-authored-by: bjorn3 <17426603+bjorn3@users.noreply.github.com> Co-authored-by: Guillaume Gomez Co-authored-by: Evgenii Zheltonozhskii Co-authored-by: roblabla Co-authored-by: 许杰友 Jieyou Xu (Joe) <39484203+jieyouxu@users.noreply.github.com> Co-authored-by: Ayush Singh Co-authored-by: Kornel Co-authored-by: Jonas Platte Co-authored-by: Alex Crichton Co-authored-by: Zachary S Co-authored-by: Jeremy Smart Co-authored-by: Ivan Tadeu Ferreira Antunes Filho Co-authored-by: Alisa Sireneva Co-authored-by: Trevor Gross Co-authored-by: Jacob Pratt Co-authored-by: Scott McMurray Co-authored-by: xonx <119700621+xonx4l@users.noreply.github.com> Co-authored-by: Yosh Co-authored-by: joboet Co-authored-by: Stuart Cook Co-authored-by: Connor Tsui Co-authored-by: Aandreba Co-authored-by: Lucas Werkmeister Co-authored-by: gitbot --- library/core/src/panic/location.rs | 42 +++++++++++++++++- library/coretests/tests/panic/location.rs | 43 ++++++++++++++++++- .../coretests/tests/panic/location/file_a.rs | 15 +++++++ .../coretests/tests/panic/location/file_b.rs | 15 +++++++ .../coretests/tests/panic/location/file_c.rs | 11 +++++ rust-toolchain.toml | 2 +- tool_config/kani-version.toml | 2 +- 7 files changed, 125 insertions(+), 5 deletions(-) create mode 100644 library/coretests/tests/panic/location/file_a.rs create mode 100644 library/coretests/tests/panic/location/file_b.rs create mode 100644 library/coretests/tests/panic/location/file_c.rs diff --git a/library/core/src/panic/location.rs b/library/core/src/panic/location.rs index 972270208852a..6ef7d5a22a30f 100644 --- a/library/core/src/panic/location.rs +++ b/library/core/src/panic/location.rs @@ -1,5 +1,7 @@ +use crate::cmp::Ordering; use crate::ffi::CStr; use crate::fmt; +use crate::hash::{Hash, Hasher}; use crate::marker::PhantomData; use crate::ptr::NonNull; @@ -32,7 +34,7 @@ use crate::ptr::NonNull; /// Files are compared as strings, not `Path`, which could be unexpected. /// See [`Location::file`]'s documentation for more discussion. #[lang = "panic_location"] -#[derive(Copy, Clone, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[derive(Copy, Clone)] #[stable(feature = "panic_hooks", since = "1.10.0")] pub struct Location<'a> { // A raw pointer is used rather than a reference because the pointer is valid for one more byte @@ -44,6 +46,44 @@ pub struct Location<'a> { _filename: PhantomData<&'a str>, } +#[stable(feature = "panic_hooks", since = "1.10.0")] +impl PartialEq for Location<'_> { + fn eq(&self, other: &Self) -> bool { + // Compare col / line first as they're cheaper to compare and more likely to differ, + // while not impacting the result. + self.col == other.col && self.line == other.line && self.file() == other.file() + } +} + +#[stable(feature = "panic_hooks", since = "1.10.0")] +impl Eq for Location<'_> {} + +#[stable(feature = "panic_hooks", since = "1.10.0")] +impl Ord for Location<'_> { + fn cmp(&self, other: &Self) -> Ordering { + self.file() + .cmp(other.file()) + .then_with(|| self.line.cmp(&other.line)) + .then_with(|| self.col.cmp(&other.col)) + } +} + +#[stable(feature = "panic_hooks", since = "1.10.0")] +impl PartialOrd for Location<'_> { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +#[stable(feature = "panic_hooks", since = "1.10.0")] +impl Hash for Location<'_> { + fn hash(&self, state: &mut H) { + self.file().hash(state); + self.line.hash(state); + self.col.hash(state); + } +} + #[stable(feature = "panic_hooks", since = "1.10.0")] impl fmt::Debug for Location<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { diff --git a/library/coretests/tests/panic/location.rs b/library/coretests/tests/panic/location.rs index 5ce0b06e90e1a..910001bcc1c58 100644 --- a/library/coretests/tests/panic/location.rs +++ b/library/coretests/tests/panic/location.rs @@ -3,6 +3,23 @@ use core::panic::Location; // Note: Some of the following tests depend on the source location, // so please be careful when editing this file. +mod file_a; +mod file_b; +mod file_c; + +// A small shuffled set of locations for testing, along with their true order. +const LOCATIONS: [(usize, &'static Location<'_>); 9] = [ + (7, file_c::two()), + (0, file_a::one()), + (3, file_b::one()), + (5, file_b::three()), + (8, file_c::three()), + (6, file_c::one()), + (2, file_a::three()), + (4, file_b::two()), + (1, file_a::two()), +]; + #[test] fn location_const_caller() { const _CALLER_REFERENCE: &Location<'static> = Location::caller(); @@ -20,7 +37,7 @@ fn location_const_file() { fn location_const_line() { const CALLER: &Location<'static> = Location::caller(); const LINE: u32 = CALLER.line(); - assert_eq!(LINE, 21); + assert_eq!(LINE, 38); } #[test] @@ -34,6 +51,28 @@ fn location_const_column() { fn location_debug() { let f = format!("{:?}", Location::caller()); assert!(f.contains(&format!("{:?}", file!()))); - assert!(f.contains("35")); + assert!(f.contains("52")); assert!(f.contains("29")); } + +#[test] +fn location_eq() { + for (i, a) in LOCATIONS { + for (j, b) in LOCATIONS { + if i == j { + assert_eq!(a, b); + } else { + assert_ne!(a, b); + } + } + } +} + +#[test] +fn location_ord() { + let mut locations = LOCATIONS.clone(); + locations.sort_by_key(|(_o, l)| **l); + for (correct, (order, _l)) in locations.iter().enumerate() { + assert_eq!(correct, *order); + } +} diff --git a/library/coretests/tests/panic/location/file_a.rs b/library/coretests/tests/panic/location/file_a.rs new file mode 100644 index 0000000000000..1ceb225ee9c39 --- /dev/null +++ b/library/coretests/tests/panic/location/file_a.rs @@ -0,0 +1,15 @@ +use core::panic::Location; + +// Used for test super::location_{ord, eq}. Must be in a dedicated file. + +pub const fn one() -> &'static Location<'static> { + Location::caller() +} + +pub const fn two() -> &'static Location<'static> { + Location::caller() +} + +pub const fn three() -> &'static Location<'static> { + Location::caller() +} diff --git a/library/coretests/tests/panic/location/file_b.rs b/library/coretests/tests/panic/location/file_b.rs new file mode 100644 index 0000000000000..1ceb225ee9c39 --- /dev/null +++ b/library/coretests/tests/panic/location/file_b.rs @@ -0,0 +1,15 @@ +use core::panic::Location; + +// Used for test super::location_{ord, eq}. Must be in a dedicated file. + +pub const fn one() -> &'static Location<'static> { + Location::caller() +} + +pub const fn two() -> &'static Location<'static> { + Location::caller() +} + +pub const fn three() -> &'static Location<'static> { + Location::caller() +} diff --git a/library/coretests/tests/panic/location/file_c.rs b/library/coretests/tests/panic/location/file_c.rs new file mode 100644 index 0000000000000..2ac416c19cf48 --- /dev/null +++ b/library/coretests/tests/panic/location/file_c.rs @@ -0,0 +1,11 @@ +// Used for test super::location_{ord, eq}. Must be in a dedicated file. + +// This is used for testing column ordering of Location, hence this ugly one-liner. +// We must fmt skip the entire containing module or else tidy will still complain. +#[rustfmt::skip] +mod no_fmt { + use core::panic::Location; + pub const fn one() -> &'static Location<'static> { Location::caller() } pub const fn two() -> &'static Location<'static> { Location::caller() } pub const fn three() -> &'static Location<'static> { Location::caller() } +} + +pub use no_fmt::*; diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 69b0a387eb9a3..23c9fac486534 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -2,5 +2,5 @@ # standard library we currently track. [toolchain] -channel = "nightly-2025-07-30" +channel = "nightly-2025-07-31" components = ["llvm-tools-preview", "rustc-dev", "rust-src", "rustfmt"] diff --git a/tool_config/kani-version.toml b/tool_config/kani-version.toml index fd33055b77a92..a50a4f9395213 100644 --- a/tool_config/kani-version.toml +++ b/tool_config/kani-version.toml @@ -2,4 +2,4 @@ # incompatible with the verify-std repo. [kani] -commit = "1954c10d033221e8085d597464bc413865152e27" +commit = "f76e668b5f86daad11e51d78ab10998426da943b" From da0761a9645f2db3cb696a7d7e3d1f9eccb77344 Mon Sep 17 00:00:00 2001 From: thanhnguyen-aws Date: Fri, 1 Aug 2025 15:15:03 -0700 Subject: [PATCH 12/20] add quantifiers to loop invariant --- library/core/src/slice/memchr.rs | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/library/core/src/slice/memchr.rs b/library/core/src/slice/memchr.rs index f181d2b314f59..32370ac8842fe 100644 --- a/library/core/src/slice/memchr.rs +++ b/library/core/src/slice/memchr.rs @@ -36,7 +36,7 @@ const fn memchr_naive(x: u8, text: &[u8]) -> Option { let mut i = 0; // FIXME(const-hack): Replace with `text.iter().pos(|c| *c == x)`. - #[safety::loop_invariant(true)] + #[safety::loop_invariant(i <= text.len() && kani::forall!(|j in (0,i)| unsafe {*text.as_ptr().wrapping_add(j)} != x))] while i < text.len() { if text[i] == x { return Some(i); @@ -79,7 +79,8 @@ const fn memchr_aligned(x: u8, text: &[u8]) -> Option { // search the body of the text let repeated_x = usize::repeat_u8(x); - #[safety::loop_invariant(len >= 2 * USIZE_BYTES && offset <= len)] + #[safety::loop_invariant(len >= 2 * USIZE_BYTES && offset <= len && + kani::forall!(|j in (0,offset)| unsafe {*text.as_ptr().wrapping_add(j)} != x))] while offset <= len - 2 * USIZE_BYTES { // SAFETY: the while's predicate guarantees a distance of at least 2 * usize_bytes // between the offset and the end of the slice. @@ -170,22 +171,21 @@ pub mod verify { use crate::kani; #[kani::proof] + #[kani::solver(cvc5)] #[cfg(not(all(target_arch = "x86_64", target_feature = "sse2")))] pub fn check_memchr_naive() { - const ARR_SIZE: usize = 1000; + const ARR_SIZE: usize = 64; let x: u8 = kani::any(); - let a: [u8; ARR_SIZE] = kani::any(); - let text = kani::slice::any_slice_of_array(&a); - let _result = memchr_naive(x, text); + let text: [u8; ARR_SIZE] = kani::any(); + let _result = memchr_naive(x, &text); } #[kani::proof] #[cfg(not(all(target_arch = "x86_64", target_feature = "sse2")))] pub fn check_memchr() { - const ARR_SIZE: usize = 1000; + const ARR_SIZE: usize = 64; let x: u8 = kani::any(); - let a: [u8; ARR_SIZE] = kani::any(); - let text = kani::slice::any_slice_of_array(&a); - let _result = memrchr(x, text); + let text: [u8; ARR_SIZE] = kani::any(); + let _result = memrchr(x, &text); } } From e4dcb1994ec94477cd5ef9b5c133acd4136584a0 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 4 Aug 2025 08:17:42 +0000 Subject: [PATCH 13/20] Update Kani Metrics (#442) This is an automated PR to update Kani metrics. The metrics have been updated by running `./scripts/run-kani.sh --run metrics`. Co-authored-by: github-merge-queue <118344674+github-merge-queue@users.noreply.github.com> --- .../kani-std-analysis/metrics-data-core.json | 22 +++++++++++++++++++ .../kani-std-analysis/metrics-data-std.json | 22 +++++++++++++++++++ 2 files changed, 44 insertions(+) diff --git a/scripts/kani-std-analysis/metrics-data-core.json b/scripts/kani-std-analysis/metrics-data-core.json index b8dd717bea50b..07873816a5e41 100644 --- a/scripts/kani-std-analysis/metrics-data-core.json +++ b/scripts/kani-std-analysis/metrics-data-core.json @@ -534,6 +534,28 @@ "verified_safe_fns_under_contract": 111, "verified_safe_fns_with_loop_under_contract": 0, "total_functions_under_contract_all_crates": 381 + }, + { + "date": "2025-08-03", + "total_unsafe_fns": 7194, + "total_unsafe_fns_with_loop": 22, + "total_safe_abstractions": 1861, + "total_safe_abstractions_with_loop": 65, + "total_safe_fns": 15898, + "total_safe_fns_with_loop": 727, + "unsafe_fns_under_contract": 253, + "unsafe_fns_with_loop_under_contract": 2, + "verified_unsafe_fns_under_contract": 244, + "verified_unsafe_fns_with_loop_under_contract": 1, + "safe_abstractions_under_contract": 77, + "safe_abstractions_with_loop_under_contract": 0, + "verified_safe_abstractions_under_contract": 77, + "verified_safe_abstractions_with_loop_under_contract": 0, + "safe_fns_under_contract": 113, + "safe_fns_with_loop_under_contract": 0, + "verified_safe_fns_under_contract": 111, + "verified_safe_fns_with_loop_under_contract": 0, + "total_functions_under_contract_all_crates": 381 } ] } \ No newline at end of file diff --git a/scripts/kani-std-analysis/metrics-data-std.json b/scripts/kani-std-analysis/metrics-data-std.json index 58c0db69f2ca4..a76c713f1b9ad 100644 --- a/scripts/kani-std-analysis/metrics-data-std.json +++ b/scripts/kani-std-analysis/metrics-data-std.json @@ -417,6 +417,28 @@ "verified_safe_fns_under_contract": 0, "verified_safe_fns_with_loop_under_contract": 0, "total_functions_under_contract_all_crates": 381 + }, + { + "date": "2025-08-03", + "total_unsafe_fns": 183, + "total_unsafe_fns_with_loop": 12, + "total_safe_abstractions": 504, + "total_safe_abstractions_with_loop": 47, + "total_safe_fns": 4169, + "total_safe_fns_with_loop": 190, + "unsafe_fns_under_contract": 9, + "unsafe_fns_with_loop_under_contract": 0, + "verified_unsafe_fns_under_contract": 2, + "verified_unsafe_fns_with_loop_under_contract": 0, + "safe_abstractions_under_contract": 0, + "safe_abstractions_with_loop_under_contract": 0, + "verified_safe_abstractions_under_contract": 0, + "verified_safe_abstractions_with_loop_under_contract": 0, + "safe_fns_under_contract": 0, + "safe_fns_with_loop_under_contract": 0, + "verified_safe_fns_under_contract": 0, + "verified_safe_fns_with_loop_under_contract": 0, + "total_functions_under_contract_all_crates": 381 } ] } \ No newline at end of file From 67098c5a562e46cd63673850e59ba08ceaff4627 Mon Sep 17 00:00:00 2001 From: thanhnguyen-aws Date: Mon, 4 Aug 2025 07:37:31 -0700 Subject: [PATCH 14/20] add im port kani --- library/core/src/slice/memchr.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/library/core/src/slice/memchr.rs b/library/core/src/slice/memchr.rs index 32370ac8842fe..d42ce66957a02 100644 --- a/library/core/src/slice/memchr.rs +++ b/library/core/src/slice/memchr.rs @@ -2,6 +2,7 @@ // Copyright 2015 Andrew Gallant, bluss and Nicolas Koch use crate::intrinsics::const_eval_select; +use crate::kani; const LO_USIZE: usize = usize::repeat_u8(0x01); const HI_USIZE: usize = usize::repeat_u8(0x80); @@ -36,7 +37,7 @@ const fn memchr_naive(x: u8, text: &[u8]) -> Option { let mut i = 0; // FIXME(const-hack): Replace with `text.iter().pos(|c| *c == x)`. - #[safety::loop_invariant(i <= text.len() && kani::forall!(|j in (0,i)| unsafe {*text.as_ptr().wrapping_add(j)} != x))] + #[kani::loop_invariant(i <= text.len() && kani::forall!(|j in (0,i)| unsafe {*text.as_ptr().wrapping_add(j)} != x))] while i < text.len() { if text[i] == x { return Some(i); @@ -79,7 +80,7 @@ const fn memchr_aligned(x: u8, text: &[u8]) -> Option { // search the body of the text let repeated_x = usize::repeat_u8(x); - #[safety::loop_invariant(len >= 2 * USIZE_BYTES && offset <= len && + #[kani::loop_invariant(len >= 2 * USIZE_BYTES && offset <= len && kani::forall!(|j in (0,offset)| unsafe {*text.as_ptr().wrapping_add(j)} != x))] while offset <= len - 2 * USIZE_BYTES { // SAFETY: the while's predicate guarantees a distance of at least 2 * usize_bytes @@ -142,7 +143,7 @@ pub fn memrchr(x: u8, text: &[u8]) -> Option { let repeated_x = usize::repeat_u8(x); let chunk_bytes = size_of::(); - #[safety::loop_invariant(true)] + #[kani::loop_invariant(true)] while offset > min_aligned_offset { // SAFETY: offset starts at len - suffix.len(), as long as it is greater than // min_aligned_offset (prefix.len()) the remaining distance is at least 2 * chunk_bytes. @@ -168,7 +169,6 @@ pub fn memrchr(x: u8, text: &[u8]) -> Option { #[unstable(feature = "kani", issue = "none")] pub mod verify { use super::*; - use crate::kani; #[kani::proof] #[kani::solver(cvc5)] From abda27abc9e0b06045ec9cc0d0acd80312d04147 Mon Sep 17 00:00:00 2001 From: thanhnguyen-aws Date: Mon, 4 Aug 2025 07:46:37 -0700 Subject: [PATCH 15/20] add cfg kani --- library/core/src/slice/memchr.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/library/core/src/slice/memchr.rs b/library/core/src/slice/memchr.rs index d42ce66957a02..88533f9548b4f 100644 --- a/library/core/src/slice/memchr.rs +++ b/library/core/src/slice/memchr.rs @@ -2,6 +2,7 @@ // Copyright 2015 Andrew Gallant, bluss and Nicolas Koch use crate::intrinsics::const_eval_select; +#[cfg(kani)] use crate::kani; const LO_USIZE: usize = usize::repeat_u8(0x01); From bba122439f4a07c514f77a148d60f0838a47e320 Mon Sep 17 00:00:00 2001 From: vonaka Date: Tue, 5 Aug 2025 19:40:01 -0700 Subject: [PATCH 16/20] LLM-generated contracts for `__iterator_get_unchecked` (#435) Preconditions generated by Claude for files containing `__iterator_get_unchecked`. By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 and MIT licenses. --------- Co-authored-by: Fedor Ryabinin Co-authored-by: Michael Tautschnig Co-authored-by: Felipe R. Monteiro --- library/alloc/src/collections/vec_deque/iter.rs | 6 ++++++ library/alloc/src/collections/vec_deque/iter_mut.rs | 6 ++++++ library/alloc/src/vec/into_iter.rs | 6 ++++++ library/core/src/array/iter.rs | 8 ++++++++ library/core/src/char/mod.rs | 8 ++++++++ library/core/src/iter/adapters/cloned.rs | 5 +++++ library/core/src/iter/adapters/copied.rs | 5 +++++ library/core/src/iter/adapters/enumerate.rs | 6 ++++++ library/core/src/iter/adapters/fuse.rs | 6 ++++++ library/core/src/iter/adapters/map.rs | 5 +++++ library/core/src/iter/adapters/skip.rs | 6 ++++++ library/core/src/iter/adapters/zip.rs | 7 +++++++ library/core/src/iter/range.rs | 1 + library/core/src/iter/traits/iterator.rs | 5 +++++ library/core/src/range/iter.rs | 6 ++++++ library/core/src/slice/iter.rs | 11 +++++++++++ library/core/src/str/iter.rs | 5 +++++ 17 files changed, 102 insertions(+) diff --git a/library/alloc/src/collections/vec_deque/iter.rs b/library/alloc/src/collections/vec_deque/iter.rs index d3dbd10c863fb..dcf7480f8e931 100644 --- a/library/alloc/src/collections/vec_deque/iter.rs +++ b/library/alloc/src/collections/vec_deque/iter.rs @@ -1,8 +1,12 @@ use core::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce}; +#[cfg(kani)] +use core::kani; use core::num::NonZero; use core::ops::Try; use core::{fmt, mem, slice}; +use safety::requires; + /// An iterator over the elements of a `VecDeque`. /// /// This `struct` is created by the [`iter`] method on [`super::VecDeque`]. See its @@ -144,6 +148,8 @@ impl<'a, T> Iterator for Iter<'a, T> { } #[inline] + #[requires(idx < self.len())] + #[cfg_attr(kani, kani::modifies(self))] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item { // Safety: The TrustedRandomAccess contract requires that callers only pass an index // that is in bounds. diff --git a/library/alloc/src/collections/vec_deque/iter_mut.rs b/library/alloc/src/collections/vec_deque/iter_mut.rs index 0c5f06e752b7b..844c5ce32aab5 100644 --- a/library/alloc/src/collections/vec_deque/iter_mut.rs +++ b/library/alloc/src/collections/vec_deque/iter_mut.rs @@ -1,8 +1,12 @@ use core::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce}; +#[cfg(kani)] +use core::kani; use core::num::NonZero; use core::ops::Try; use core::{fmt, mem, slice}; +use safety::requires; + /// A mutable iterator over the elements of a `VecDeque`. /// /// This `struct` is created by the [`iter_mut`] method on [`super::VecDeque`]. See its @@ -208,6 +212,8 @@ impl<'a, T> Iterator for IterMut<'a, T> { } #[inline] + #[requires(idx < self.len())] + #[cfg_attr(kani, kani::modifies(self))] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item { // Safety: The TrustedRandomAccess contract requires that callers only pass an index // that is in bounds. diff --git a/library/alloc/src/vec/into_iter.rs b/library/alloc/src/vec/into_iter.rs index 37df928228d9c..35ff96027d804 100644 --- a/library/alloc/src/vec/into_iter.rs +++ b/library/alloc/src/vec/into_iter.rs @@ -2,6 +2,8 @@ use core::iter::{ FusedIterator, InPlaceIterable, SourceIter, TrustedFused, TrustedLen, TrustedRandomAccessNoCoerce, }; +#[cfg(kani)] +use core::kani; use core::marker::PhantomData; use core::mem::{ManuallyDrop, MaybeUninit, SizedTypeProperties}; use core::num::NonZero; @@ -11,6 +13,8 @@ use core::ptr::{self, NonNull}; use core::slice::{self}; use core::{array, fmt}; +use safety::requires; + #[cfg(not(no_global_oom_handling))] use super::AsVecIntoIter; use crate::alloc::{Allocator, Global}; @@ -354,6 +358,8 @@ impl Iterator for IntoIter { R::from_output(accum) } + #[requires(i < self.len())] + #[cfg_attr(kani, kani::modifies(self))] unsafe fn __iterator_get_unchecked(&mut self, i: usize) -> Self::Item where Self: TrustedRandomAccessNoCoerce, diff --git a/library/core/src/array/iter.rs b/library/core/src/array/iter.rs index fdae5c08f1e8e..111a6735f5186 100644 --- a/library/core/src/array/iter.rs +++ b/library/core/src/array/iter.rs @@ -1,7 +1,11 @@ //! Defines the `IntoIter` owned iterator for arrays. +use safety::requires; + use crate::intrinsics::transmute_unchecked; use crate::iter::{FusedIterator, TrustedLen, TrustedRandomAccessNoCoerce}; +#[cfg(kani)] +use crate::kani; use crate::mem::MaybeUninit; use crate::num::NonZero; use crate::ops::{IndexRange, Range, Try}; @@ -138,6 +142,8 @@ impl IntoIter { /// ``` #[unstable(feature = "array_into_iter_constructors", issue = "91583")] #[inline] + #[requires(initialized.start <= initialized.end)] + #[requires(initialized.end <= N)] pub const unsafe fn new_unchecked( buffer: [MaybeUninit; N], initialized: Range, @@ -279,6 +285,8 @@ impl Iterator for IntoIter { } #[inline] + #[requires(idx < self.len())] + #[cfg_attr(kani, kani::modifies(self))] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item { // SAFETY: The caller must provide an idx that is in bound of the remainder. let elem_ref = unsafe { self.as_mut_slice().get_unchecked_mut(idx) }; diff --git a/library/core/src/char/mod.rs b/library/core/src/char/mod.rs index 82a3f6f916be3..eed3514716838 100644 --- a/library/core/src/char/mod.rs +++ b/library/core/src/char/mod.rs @@ -24,6 +24,8 @@ mod convert; mod decode; mod methods; +use safety::requires; + // stable re-exports #[rustfmt::skip] #[stable(feature = "try_from", since = "1.34.0")] @@ -47,6 +49,8 @@ use crate::error::Error; use crate::escape::{AlwaysEscaped, EscapeIterInner, MaybeEscaped}; use crate::fmt::{self, Write}; use crate::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce}; +#[cfg(kani)] +use crate::kani; use crate::num::NonZero; // UTF-8 ranges and tags for encoding characters @@ -138,6 +142,7 @@ pub const fn from_u32(i: u32) -> Option { #[rustc_const_stable(feature = "const_char_from_u32_unchecked", since = "1.81.0")] #[must_use] #[inline] +#[requires(i <= 0x10FFFF && (i < 0xD800 || i > 0xDFFF))] pub const unsafe fn from_u32_unchecked(i: u32) -> char { // SAFETY: the safety contract must be upheld by the caller. unsafe { self::convert::from_u32_unchecked(i) } @@ -399,6 +404,7 @@ macro_rules! casemappingiter_impls { self.0.advance_by(n) } + #[requires(idx < self.0.len())] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item { // SAFETY: just forwarding requirements to caller unsafe { self.0.__iterator_get_unchecked(idx) } @@ -533,6 +539,8 @@ impl Iterator for CaseMappingIter { self.0.advance_by(n) } + #[requires(idx < self.len())] + #[cfg_attr(kani, kani::modifies(self))] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item { // SAFETY: just forwarding requirements to caller unsafe { self.0.__iterator_get_unchecked(idx) } diff --git a/library/core/src/iter/adapters/cloned.rs b/library/core/src/iter/adapters/cloned.rs index aea6d64281aec..0f05260059880 100644 --- a/library/core/src/iter/adapters/cloned.rs +++ b/library/core/src/iter/adapters/cloned.rs @@ -1,8 +1,12 @@ use core::num::NonZero; +use safety::requires; + use crate::iter::adapters::zip::try_get_unchecked; use crate::iter::adapters::{SourceIter, TrustedRandomAccess, TrustedRandomAccessNoCoerce}; use crate::iter::{FusedIterator, InPlaceIterable, TrustedLen, UncheckedIterator}; +#[cfg(kani)] +use crate::kani; use crate::ops::Try; /// An iterator that clones the elements of an underlying iterator. @@ -61,6 +65,7 @@ where self.it.map(T::clone).fold(init, f) } + #[requires(idx < self.it.size_hint().0)] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> T where Self: TrustedRandomAccessNoCoerce, diff --git a/library/core/src/iter/adapters/copied.rs b/library/core/src/iter/adapters/copied.rs index 23e4e25ab5388..0b9da45acaa06 100644 --- a/library/core/src/iter/adapters/copied.rs +++ b/library/core/src/iter/adapters/copied.rs @@ -1,6 +1,10 @@ +use safety::requires; + use crate::iter::adapters::zip::try_get_unchecked; use crate::iter::adapters::{SourceIter, TrustedRandomAccess, TrustedRandomAccessNoCoerce}; use crate::iter::{FusedIterator, InPlaceIterable, TrustedLen}; +#[cfg(kani)] +use crate::kani; use crate::mem::{MaybeUninit, SizedTypeProperties}; use crate::num::NonZero; use crate::ops::Try; @@ -92,6 +96,7 @@ where self.it.advance_by(n) } + #[requires(idx < self.it.size_hint().0)] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> T where Self: TrustedRandomAccessNoCoerce, diff --git a/library/core/src/iter/adapters/enumerate.rs b/library/core/src/iter/adapters/enumerate.rs index f7b9f0b7a5e9d..e7e18d178031f 100644 --- a/library/core/src/iter/adapters/enumerate.rs +++ b/library/core/src/iter/adapters/enumerate.rs @@ -1,6 +1,10 @@ +use safety::requires; + use crate::iter::adapters::zip::try_get_unchecked; use crate::iter::adapters::{SourceIter, TrustedRandomAccess, TrustedRandomAccessNoCoerce}; use crate::iter::{FusedIterator, InPlaceIterable, TrustedFused, TrustedLen}; +#[cfg(kani)] +use crate::kani; use crate::num::NonZero; use crate::ops::Try; @@ -160,6 +164,8 @@ where #[rustc_inherit_overflow_checks] #[inline] + #[requires(idx < self.iter.size_hint().0)] + #[cfg_attr(kani, kani::modifies(self))] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> ::Item where Self: TrustedRandomAccessNoCoerce, diff --git a/library/core/src/iter/adapters/fuse.rs b/library/core/src/iter/adapters/fuse.rs index 0072a95e8dfe0..fcad6168d85cd 100644 --- a/library/core/src/iter/adapters/fuse.rs +++ b/library/core/src/iter/adapters/fuse.rs @@ -1,9 +1,13 @@ +use safety::requires; + use crate::intrinsics; use crate::iter::adapters::SourceIter; use crate::iter::adapters::zip::try_get_unchecked; use crate::iter::{ FusedIterator, TrustedFused, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce, }; +#[cfg(kani)] +use crate::kani; use crate::ops::Try; /// An iterator that yields `None` forever after the underlying iterator @@ -109,6 +113,8 @@ where } #[inline] + #[requires(self.iter.is_some() && idx < self.iter.as_ref().unwrap().size_hint().0)] + #[cfg_attr(kani, kani::modifies(self))] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item where Self: TrustedRandomAccessNoCoerce, diff --git a/library/core/src/iter/adapters/map.rs b/library/core/src/iter/adapters/map.rs index 007c2d5acc2d0..c738b8cda957b 100644 --- a/library/core/src/iter/adapters/map.rs +++ b/library/core/src/iter/adapters/map.rs @@ -1,7 +1,11 @@ +use safety::requires; + use crate::fmt; use crate::iter::adapters::zip::try_get_unchecked; use crate::iter::adapters::{SourceIter, TrustedRandomAccess, TrustedRandomAccessNoCoerce}; use crate::iter::{FusedIterator, InPlaceIterable, TrustedFused, TrustedLen, UncheckedIterator}; +#[cfg(kani)] +use crate::kani; use crate::num::NonZero; use crate::ops::Try; @@ -129,6 +133,7 @@ where } #[inline] + #[requires(idx < self.iter.size_hint().0)] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> B where Self: TrustedRandomAccessNoCoerce, diff --git a/library/core/src/iter/adapters/skip.rs b/library/core/src/iter/adapters/skip.rs index 55c4a7f14fbd6..ac3cc4c4f1152 100644 --- a/library/core/src/iter/adapters/skip.rs +++ b/library/core/src/iter/adapters/skip.rs @@ -1,3 +1,5 @@ +use safety::requires; + use crate::intrinsics::unlikely; use crate::iter::adapters::SourceIter; use crate::iter::adapters::zip::try_get_unchecked; @@ -5,6 +7,8 @@ use crate::iter::{ FusedIterator, InPlaceIterable, TrustedFused, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce, }; +#[cfg(kani)] +use crate::kani; use crate::num::NonZero; use crate::ops::{ControlFlow, Try}; @@ -158,6 +162,8 @@ where } #[doc(hidden)] + #[requires(idx + self.n < self.iter.size_hint().0)] + #[cfg_attr(kani, kani::modifies(self))] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item where Self: TrustedRandomAccessNoCoerce, diff --git a/library/core/src/iter/adapters/zip.rs b/library/core/src/iter/adapters/zip.rs index c5e199c30821d..e39f1535bd95d 100644 --- a/library/core/src/iter/adapters/zip.rs +++ b/library/core/src/iter/adapters/zip.rs @@ -1,8 +1,12 @@ +use safety::requires; + use crate::cmp; use crate::fmt::{self, Debug}; use crate::iter::{ FusedIterator, InPlaceIterable, SourceIter, TrustedFused, TrustedLen, UncheckedIterator, }; +#[cfg(kani)] +use crate::kani; use crate::num::NonZero; /// An iterator that iterates two other iterators simultaneously. @@ -104,6 +108,8 @@ where } #[inline] + #[requires(idx < self.size_hint().0)] + #[cfg_attr(kani, kani::modifies(self))] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item where Self: TrustedRandomAccessNoCoerce, @@ -264,6 +270,7 @@ where } #[inline] + #[cfg_attr(kani, kani::modifies(self))] unsafe fn get_unchecked(&mut self, idx: usize) -> ::Item { let idx = self.index + idx; // SAFETY: the caller must uphold the contract for diff --git a/library/core/src/iter/range.rs b/library/core/src/iter/range.rs index 70a38eafaa443..79cc6873adc7d 100644 --- a/library/core/src/iter/range.rs +++ b/library/core/src/iter/range.rs @@ -933,6 +933,7 @@ impl Iterator for ops::Range { } #[inline] + #[requires(idx < self.size_hint().0)] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item where Self: TrustedRandomAccessNoCoerce, diff --git a/library/core/src/iter/traits/iterator.rs b/library/core/src/iter/traits/iterator.rs index 10f9d464f7d7f..b86926e0daf14 100644 --- a/library/core/src/iter/traits/iterator.rs +++ b/library/core/src/iter/traits/iterator.rs @@ -1,3 +1,5 @@ +use safety::requires; + use super::super::{ ArrayChunks, ByRefSized, Chain, Cloned, Copied, Cycle, Enumerate, Filter, FilterMap, FlatMap, Flatten, Fuse, Inspect, Intersperse, IntersperseWith, Map, MapWhile, MapWindows, Peekable, @@ -6,6 +8,8 @@ use super::super::{ }; use crate::array; use crate::cmp::{self, Ordering}; +#[cfg(kani)] +use crate::kani; use crate::num::NonZero; use crate::ops::{ChangeOutputType, ControlFlow, FromResidual, Residual, Try}; @@ -1709,6 +1713,7 @@ pub trait Iterator { /// ``` #[inline] #[unstable(feature = "iter_map_windows", reason = "recently added", issue = "87155")] + #[requires(N > 0)] fn map_windows(self, f: F) -> MapWindows where Self: Sized, diff --git a/library/core/src/range/iter.rs b/library/core/src/range/iter.rs index 1e261d8c1d93a..622a8a95d5cad 100644 --- a/library/core/src/range/iter.rs +++ b/library/core/src/range/iter.rs @@ -1,6 +1,10 @@ +use safety::requires; + use crate::iter::{ FusedIterator, Step, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce, TrustedStep, }; +#[cfg(kani)] +use crate::kani; use crate::num::NonZero; use crate::range::{Range, RangeFrom, RangeInclusive, legacy}; @@ -104,6 +108,8 @@ impl Iterator for IterRange { } #[inline] + #[requires(idx < self.size_hint().0)] + #[cfg_attr(kani, kani::modifies(self))] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item where Self: TrustedRandomAccessNoCoerce, diff --git a/library/core/src/slice/iter.rs b/library/core/src/slice/iter.rs index 22b78418d67ee..6f6366ff45386 100644 --- a/library/core/src/slice/iter.rs +++ b/library/core/src/slice/iter.rs @@ -3,6 +3,8 @@ #[macro_use] // import iterator! and forward_iterator! mod macros; +use safety::requires; + use super::{from_raw_parts, from_raw_parts_mut}; use crate::hint::assert_unchecked; use crate::iter::{ @@ -1455,6 +1457,7 @@ impl<'a, T> Iterator for Windows<'a, T> { } } + #[requires(idx < self.len())] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item { // SAFETY: since the caller guarantees that `i` is in bounds, // which means that `i` cannot overflow an `isize`, and the @@ -1612,6 +1615,7 @@ impl<'a, T> Iterator for Chunks<'a, T> { } } + #[requires(idx < self.len())] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item { let start = idx * self.chunk_size; // SAFETY: the caller guarantees that `i` is in bounds, @@ -1801,6 +1805,7 @@ impl<'a, T> Iterator for ChunksMut<'a, T> { } } + #[requires(idx < self.len())] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item { let start = idx * self.chunk_size; // SAFETY: see comments for `Chunks::__iterator_get_unchecked` and `self.v`. @@ -1999,6 +2004,7 @@ impl<'a, T> Iterator for ChunksExact<'a, T> { self.next_back() } + #[requires(idx < self.len())] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item { let start = idx * self.chunk_size; // SAFETY: mostly identical to `Chunks::__iterator_get_unchecked`. @@ -2160,6 +2166,7 @@ impl<'a, T> Iterator for ChunksExactMut<'a, T> { self.next_back() } + #[requires(idx < self.len())] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item { let start = idx * self.chunk_size; // SAFETY: see comments for `Chunks::__iterator_get_unchecked` and `self.v`. @@ -2466,6 +2473,7 @@ impl<'a, T> Iterator for RChunks<'a, T> { } } + #[requires(idx < self.len())] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item { let end = self.v.len() - idx * self.chunk_size; let start = match end.checked_sub(self.chunk_size) { @@ -2646,6 +2654,7 @@ impl<'a, T> Iterator for RChunksMut<'a, T> { } } + #[requires(idx < self.len())] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item { let end = self.v.len() - idx * self.chunk_size; let start = match end.checked_sub(self.chunk_size) { @@ -2839,6 +2848,7 @@ impl<'a, T> Iterator for RChunksExact<'a, T> { self.next_back() } + #[requires(idx < self.len())] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item { let end = self.v.len() - idx * self.chunk_size; let start = end - self.chunk_size; @@ -3005,6 +3015,7 @@ impl<'a, T> Iterator for RChunksExactMut<'a, T> { self.next_back() } + #[requires(idx < self.len())] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item { let end = self.v.len() - idx * self.chunk_size; let start = end - self.chunk_size; diff --git a/library/core/src/str/iter.rs b/library/core/src/str/iter.rs index d2985d8a18669..34e2ab79b9d34 100644 --- a/library/core/src/str/iter.rs +++ b/library/core/src/str/iter.rs @@ -1,5 +1,7 @@ //! Iterators for `str` methods. +use safety::requires; + use super::pattern::{DoubleEndedSearcher, Pattern, ReverseSearcher, Searcher}; use super::validations::{next_code_point, next_code_point_reverse}; use super::{ @@ -11,6 +13,8 @@ use crate::iter::{ Chain, Copied, Filter, FlatMap, Flatten, FusedIterator, Map, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce, }; +#[cfg(kani)] +use crate::kani; use crate::num::NonZero; use crate::ops::Try; use crate::slice::{self, Split as SliceSplit}; @@ -356,6 +360,7 @@ impl Iterator for Bytes<'_> { } #[inline] + #[requires(idx < self.0.len())] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> u8 { // SAFETY: the caller must uphold the safety contract // for `Iterator::__iterator_get_unchecked`. From ca5f7b8bb3f19c0153be07cef4f524864de03b42 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 6 Aug 2025 09:07:27 +0000 Subject: [PATCH 17/20] Merge subtree update for toolchain nightly-2025-08-01 (#441) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is an automated PR to merge library subtree updates from 2025-07-31 (rust-lang/rust@3048886e59c94470e726ecaaf2add7242510ac11) to 2025-08-01 (rust-lang/rust@adcb3d3b4cd3b7c4cde642f3ed537037f293738e), inclusive. This is a clean merge, no conflicts were detected. **Do not remove or edit the following annotations:** git-subtree-dir: library git-subtree-split: 158ca2483ba4729a8e005fb98daa83e7353e34b9 --------- Signed-off-by: xizheyin Signed-off-by: Ayush Singh Co-authored-by: Ralf Jung Co-authored-by: Marijn Schouten Co-authored-by: Josh Triplett Co-authored-by: Matthias Krüger <476013+matthiaskrgr@users.noreply.github.com> Co-authored-by: Folkert de Vries Co-authored-by: okaneco <47607823+okaneco@users.noreply.github.com> Co-authored-by: Folkert de Vries Co-authored-by: Sayantan Chakraborty <142906350+sayantn@users.noreply.github.com> Co-authored-by: nazo6 Co-authored-by: bors Co-authored-by: Jakub Beránek Co-authored-by: Orson Peters Co-authored-by: León Orell Valerian Liehr Co-authored-by: xizheyin Co-authored-by: Deadbeef Co-authored-by: René Kijewski Co-authored-by: Nik Revenco Co-authored-by: The Miri Cronjob Bot Co-authored-by: Tim (Theemathas) Chirananthavat Co-authored-by: Chris Denton Co-authored-by: Samuel Tardieu Co-authored-by: Travis Cross Co-authored-by: Amanieu d'Antras Co-authored-by: Oli Scherer Co-authored-by: David Mládek Co-authored-by: SunkenPotato <3.14radius02@gmail.com> Co-authored-by: Cameron Steffen Co-authored-by: Martin Ombura Jr <8682597+martinomburajr@users.noreply.github.com> Co-authored-by: Madhav Madhusoodanan Co-authored-by: sayantn Co-authored-by: Trevor Gross Co-authored-by: WANG Rui Co-authored-by: Luigi Sartor Piucco Co-authored-by: Julien THILLARD <54775010+supersurviveur@users.noreply.github.com> Co-authored-by: The rustc-josh-sync Cronjob Bot Co-authored-by: usamoi Co-authored-by: Rémy Rakic Co-authored-by: Nurzhan Sakén Co-authored-by: ltdk Co-authored-by: bjorn3 <17426603+bjorn3@users.noreply.github.com> Co-authored-by: Guillaume Gomez Co-authored-by: Evgenii Zheltonozhskii Co-authored-by: roblabla Co-authored-by: 许杰友 Jieyou Xu (Joe) <39484203+jieyouxu@users.noreply.github.com> Co-authored-by: Ayush Singh Co-authored-by: Kornel Co-authored-by: Jonas Platte Co-authored-by: Alex Crichton Co-authored-by: Zachary S Co-authored-by: Jeremy Smart Co-authored-by: Ivan Tadeu Ferreira Antunes Filho Co-authored-by: Alisa Sireneva Co-authored-by: Trevor Gross Co-authored-by: Jacob Pratt Co-authored-by: Scott McMurray Co-authored-by: xonx <119700621+xonx4l@users.noreply.github.com> Co-authored-by: Yosh Co-authored-by: joboet Co-authored-by: Stuart Cook Co-authored-by: Connor Tsui Co-authored-by: Aandreba Co-authored-by: Lucas Werkmeister Co-authored-by: Balt <59123926+balt-dev@users.noreply.github.com> Co-authored-by: Tsukasa OI Co-authored-by: gitbot Co-authored-by: Michael Tautschnig Co-authored-by: Felipe R. Monteiro --- library/alloc/src/collections/linked_list.rs | 54 +++++++- .../alloc/src/collections/vec_deque/mod.rs | 96 ++++++++++++-- library/alloc/src/vec/mod.rs | 122 ++++++++++++++++-- library/std_detect/src/detect/arch/riscv.rs | 3 + .../std_detect/src/detect/os/linux/riscv.rs | 8 +- library/std_detect/src/detect/os/riscv.rs | 2 +- library/std_detect/tests/cpu-detection.rs | 1 + library/test/src/lib.rs | 4 +- rust-toolchain.toml | 2 +- scripts/kani-std-analysis/log_parser.py | 25 ++-- scripts/run-kani.sh | 10 +- tool_config/kani-version.toml | 2 +- .../original/linked_list.rs | 54 +++++++- .../verified/linked_list.rs | 54 +++++++- .../linked_list.rs/original/linked_list.rs | 54 +++++++- .../linked_list.rs/verified/linked_list.rs | 54 +++++++- 16 files changed, 477 insertions(+), 68 deletions(-) diff --git a/library/alloc/src/collections/linked_list.rs b/library/alloc/src/collections/linked_list.rs index 70c344e49b76a..31dfe73fc7992 100644 --- a/library/alloc/src/collections/linked_list.rs +++ b/library/alloc/src/collections/linked_list.rs @@ -825,7 +825,7 @@ impl LinkedList { unsafe { self.tail.as_mut().map(|node| &mut node.as_mut().element) } } - /// Adds an element first in the list. + /// Adds an element to the front of the list. /// /// This operation should compute in *O*(1) time. /// @@ -844,11 +844,34 @@ impl LinkedList { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn push_front(&mut self, elt: T) { + let _ = self.push_front_mut(elt); + } + + /// Adds an element to the front of the list, returning a reference to it. + /// + /// This operation should compute in *O*(1) time. + /// + /// # Examples + /// + /// ``` + /// #![feature(push_mut)] + /// use std::collections::LinkedList; + /// + /// let mut dl = LinkedList::from([1, 2, 3]); + /// + /// let ptr = dl.push_front_mut(2); + /// *ptr += 4; + /// assert_eq!(dl.front().unwrap(), &6); + /// ``` + #[unstable(feature = "push_mut", issue = "135974")] + #[must_use = "if you don't need a reference to the value, use `LinkedList::push_front` instead"] + pub fn push_front_mut(&mut self, elt: T) -> &mut T { let node = Box::new_in(Node::new(elt), &self.alloc); - let node_ptr = NonNull::from(Box::leak(node)); + let mut node_ptr = NonNull::from(Box::leak(node)); // SAFETY: node_ptr is a unique pointer to a node we boxed with self.alloc and leaked unsafe { self.push_front_node(node_ptr); + &mut node_ptr.as_mut().element } } @@ -876,7 +899,7 @@ impl LinkedList { self.pop_front_node().map(Node::into_element) } - /// Appends an element to the back of a list. + /// Adds an element to the back of the list. /// /// This operation should compute in *O*(1) time. /// @@ -893,11 +916,34 @@ impl LinkedList { #[stable(feature = "rust1", since = "1.0.0")] #[rustc_confusables("push", "append")] pub fn push_back(&mut self, elt: T) { + let _ = self.push_back_mut(elt); + } + + /// Adds an element to the back of the list, returning a reference to it. + /// + /// This operation should compute in *O*(1) time. + /// + /// # Examples + /// + /// ``` + /// #![feature(push_mut)] + /// use std::collections::LinkedList; + /// + /// let mut dl = LinkedList::from([1, 2, 3]); + /// + /// let ptr = dl.push_back_mut(2); + /// *ptr += 4; + /// assert_eq!(dl.back().unwrap(), &6); + /// ``` + #[unstable(feature = "push_mut", issue = "135974")] + #[must_use = "if you don't need a reference to the value, use `LinkedList::push_back` instead"] + pub fn push_back_mut(&mut self, elt: T) -> &mut T { let node = Box::new_in(Node::new(elt), &self.alloc); - let node_ptr = NonNull::from(Box::leak(node)); + let mut node_ptr = NonNull::from(Box::leak(node)); // SAFETY: node_ptr is a unique pointer to a node we boxed with self.alloc and leaked unsafe { self.push_back_node(node_ptr); + &mut node_ptr.as_mut().element } } diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs index 208f0da6fa830..9d78149a41347 100644 --- a/library/alloc/src/collections/vec_deque/mod.rs +++ b/library/alloc/src/collections/vec_deque/mod.rs @@ -185,11 +185,16 @@ impl VecDeque { unsafe { ptr::read(self.ptr().add(off)) } } - /// Writes an element into the buffer, moving it. + /// Writes an element into the buffer, moving it and returning a pointer to it. + /// # Safety + /// + /// May only be called if `off < self.capacity()`. #[inline] - unsafe fn buffer_write(&mut self, off: usize, value: T) { + unsafe fn buffer_write(&mut self, off: usize, value: T) -> &mut T { unsafe { - ptr::write(self.ptr().add(off), value); + let ptr = self.ptr().add(off); + ptr::write(ptr, value); + &mut *ptr } } @@ -1891,16 +1896,34 @@ impl VecDeque { #[stable(feature = "rust1", since = "1.0.0")] #[track_caller] pub fn push_front(&mut self, value: T) { + let _ = self.push_front_mut(value); + } + + /// Prepends an element to the deque, returning a reference to it. + /// + /// # Examples + /// + /// ``` + /// #![feature(push_mut)] + /// use std::collections::VecDeque; + /// + /// let mut d = VecDeque::from([1, 2, 3]); + /// let x = d.push_front_mut(8); + /// *x -= 1; + /// assert_eq!(d.front(), Some(&7)); + /// ``` + #[unstable(feature = "push_mut", issue = "135974")] + #[track_caller] + #[must_use = "if you don't need a reference to the value, use `VecDeque::push_front` instead"] + pub fn push_front_mut(&mut self, value: T) -> &mut T { if self.is_full() { self.grow(); } self.head = self.wrap_sub(self.head, 1); self.len += 1; - - unsafe { - self.buffer_write(self.head, value); - } + // SAFETY: We know that self.head is within range of the deque. + unsafe { self.buffer_write(self.head, value) } } /// Appends an element to the back of the deque. @@ -1919,12 +1942,33 @@ impl VecDeque { #[rustc_confusables("push", "put", "append")] #[track_caller] pub fn push_back(&mut self, value: T) { + let _ = self.push_back_mut(value); + } + + /// Appends an element to the back of the deque, returning a reference to it. + /// + /// # Examples + /// + /// ``` + /// #![feature(push_mut)] + /// use std::collections::VecDeque; + /// + /// let mut d = VecDeque::from([1, 2, 3]); + /// let x = d.push_back_mut(9); + /// *x += 1; + /// assert_eq!(d.back(), Some(&10)); + /// ``` + #[unstable(feature = "push_mut", issue = "135974")] + #[track_caller] + #[must_use = "if you don't need a reference to the value, use `VecDeque::push_back` instead"] + pub fn push_back_mut(&mut self, value: T) -> &mut T { if self.is_full() { self.grow(); } - unsafe { self.buffer_write(self.to_physical_idx(self.len), value) } + let len = self.len; self.len += 1; + unsafe { self.buffer_write(self.to_physical_idx(len), value) } } #[inline] @@ -2010,7 +2054,7 @@ impl VecDeque { /// /// # Panics /// - /// Panics if `index` is strictly greater than deque's length + /// Panics if `index` is strictly greater than the deque's length. /// /// # Examples /// @@ -2032,7 +2076,37 @@ impl VecDeque { #[stable(feature = "deque_extras_15", since = "1.5.0")] #[track_caller] pub fn insert(&mut self, index: usize, value: T) { + let _ = self.insert_mut(index, value); + } + + /// Inserts an element at `index` within the deque, shifting all elements + /// with indices greater than or equal to `index` towards the back, and + /// returning a reference to it. + /// + /// Element at index 0 is the front of the queue. + /// + /// # Panics + /// + /// Panics if `index` is strictly greater than the deque's length. + /// + /// # Examples + /// + /// ``` + /// #![feature(push_mut)] + /// use std::collections::VecDeque; + /// + /// let mut vec_deque = VecDeque::from([1, 2, 3]); + /// + /// let x = vec_deque.insert_mut(1, 5); + /// *x += 7; + /// assert_eq!(vec_deque, &[1, 12, 2, 3]); + /// ``` + #[unstable(feature = "push_mut", issue = "135974")] + #[track_caller] + #[must_use = "if you don't need a reference to the value, use `VecDeque::insert` instead"] + pub fn insert_mut(&mut self, index: usize, value: T) -> &mut T { assert!(index <= self.len(), "index out of bounds"); + if self.is_full() { self.grow(); } @@ -2045,16 +2119,16 @@ impl VecDeque { unsafe { // see `remove()` for explanation why this wrap_copy() call is safe. self.wrap_copy(self.to_physical_idx(index), self.to_physical_idx(index + 1), k); - self.buffer_write(self.to_physical_idx(index), value); self.len += 1; + self.buffer_write(self.to_physical_idx(index), value) } } else { let old_head = self.head; self.head = self.wrap_sub(self.head, 1); unsafe { self.wrap_copy(old_head, self.head, index); - self.buffer_write(self.to_physical_idx(index), value); self.len += 1; + self.buffer_write(self.to_physical_idx(index), value) } } } diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs index d69b31da8c27f..65db56710b51b 100644 --- a/library/alloc/src/vec/mod.rs +++ b/library/alloc/src/vec/mod.rs @@ -2046,6 +2046,38 @@ impl Vec { #[stable(feature = "rust1", since = "1.0.0")] #[track_caller] pub fn insert(&mut self, index: usize, element: T) { + let _ = self.insert_mut(index, element); + } + + /// Inserts an element at position `index` within the vector, shifting all + /// elements after it to the right, and returning a reference to the new + /// element. + /// + /// # Panics + /// + /// Panics if `index > len`. + /// + /// # Examples + /// + /// ``` + /// #![feature(push_mut)] + /// let mut vec = vec![1, 3, 5, 9]; + /// let x = vec.insert_mut(3, 6); + /// *x += 1; + /// assert_eq!(vec, [1, 3, 5, 7, 9]); + /// ``` + /// + /// # Time complexity + /// + /// Takes *O*([`Vec::len`]) time. All items after the insertion index must be + /// shifted to the right. In the worst case, all elements are shifted when + /// the insertion index is 0. + #[cfg(not(no_global_oom_handling))] + #[inline] + #[unstable(feature = "push_mut", issue = "135974")] + #[track_caller] + #[must_use = "if you don't need a reference to the value, use `Vec::insert` instead"] + pub fn insert_mut(&mut self, index: usize, element: T) -> &mut T { #[cold] #[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))] #[track_caller] @@ -2067,8 +2099,8 @@ impl Vec { unsafe { // infallible // The spot to put the new value + let p = self.as_mut_ptr().add(index); { - let p = self.as_mut_ptr().add(index); if index < len { // Shift everything over to make space. (Duplicating the // `index`th element into two consecutive places.) @@ -2079,6 +2111,7 @@ impl Vec { ptr::write(p, element); } self.set_len(len + 1); + &mut *p } } @@ -2486,18 +2519,7 @@ impl Vec { #[rustc_confusables("push_back", "put", "append")] #[track_caller] pub fn push(&mut self, value: T) { - // Inform codegen that the length does not change across grow_one(). - let len = self.len; - // This will panic or abort if we would allocate > isize::MAX bytes - // or if the length increment would overflow for zero-sized types. - if len == self.buf.capacity() { - self.buf.grow_one(); - } - unsafe { - let end = self.as_mut_ptr().add(len); - ptr::write(end, value); - self.len = len + 1; - } + let _ = self.push_mut(value); } /// Appends an element if there is sufficient spare capacity, otherwise an error is returned @@ -2538,6 +2560,77 @@ impl Vec { #[inline] #[unstable(feature = "vec_push_within_capacity", issue = "100486")] pub fn push_within_capacity(&mut self, value: T) -> Result<(), T> { + self.push_mut_within_capacity(value).map(|_| ()) + } + + /// Appends an element to the back of a collection, returning a reference to it. + /// + /// # Panics + /// + /// Panics if the new capacity exceeds `isize::MAX` _bytes_. + /// + /// # Examples + /// + /// ``` + /// #![feature(push_mut)] + /// + /// + /// let mut vec = vec![1, 2]; + /// let last = vec.push_mut(3); + /// assert_eq!(*last, 3); + /// assert_eq!(vec, [1, 2, 3]); + /// + /// let last = vec.push_mut(3); + /// *last += 1; + /// assert_eq!(vec, [1, 2, 3, 4]); + /// ``` + /// + /// # Time complexity + /// + /// Takes amortized *O*(1) time. If the vector's length would exceed its + /// capacity after the push, *O*(*capacity*) time is taken to copy the + /// vector's elements to a larger allocation. This expensive operation is + /// offset by the *capacity* *O*(1) insertions it allows. + #[cfg(not(no_global_oom_handling))] + #[inline] + #[unstable(feature = "push_mut", issue = "135974")] + #[track_caller] + #[must_use = "if you don't need a reference to the value, use `Vec::push` instead"] + pub fn push_mut(&mut self, value: T) -> &mut T { + // Inform codegen that the length does not change across grow_one(). + let len = self.len; + // This will panic or abort if we would allocate > isize::MAX bytes + // or if the length increment would overflow for zero-sized types. + if len == self.buf.capacity() { + self.buf.grow_one(); + } + unsafe { + let end = self.as_mut_ptr().add(len); + ptr::write(end, value); + self.len = len + 1; + // SAFETY: We just wrote a value to the pointer that will live the lifetime of the reference. + &mut *end + } + } + + /// Appends an element and returns a reference to it if there is sufficient spare capacity, + /// otherwise an error is returned with the element. + /// + /// Unlike [`push_mut`] this method will not reallocate when there's insufficient capacity. + /// The caller should use [`reserve`] or [`try_reserve`] to ensure that there is enough capacity. + /// + /// [`push_mut`]: Vec::push_mut + /// [`reserve`]: Vec::reserve + /// [`try_reserve`]: Vec::try_reserve + /// + /// # Time complexity + /// + /// Takes *O*(1) time. + #[unstable(feature = "push_mut", issue = "135974")] + // #[unstable(feature = "vec_push_within_capacity", issue = "100486")] + #[inline] + #[must_use = "if you don't need a reference to the value, use `Vec::push_within_capacity` instead"] + pub fn push_mut_within_capacity(&mut self, value: T) -> Result<&mut T, T> { if self.len == self.buf.capacity() { return Err(value); } @@ -2545,8 +2638,9 @@ impl Vec { let end = self.as_mut_ptr().add(self.len); ptr::write(end, value); self.len += 1; + // SAFETY: We just wrote a value to the pointer that will live the lifetime of the reference. + Ok(&mut *end) } - Ok(()) } /// Removes the last element from a vector and returns it, or [`None`] if it diff --git a/library/std_detect/src/detect/arch/riscv.rs b/library/std_detect/src/detect/arch/riscv.rs index b86190d7bbf0c..1d21b1d485589 100644 --- a/library/std_detect/src/detect/arch/riscv.rs +++ b/library/std_detect/src/detect/arch/riscv.rs @@ -73,6 +73,7 @@ features! { /// * Zihintpause: `"zihintpause"` /// * Zihpm: `"zihpm"` /// * Zimop: `"zimop"` + /// * Zabha: `"zabha"` /// * Zacas: `"zacas"` /// * Zawrs: `"zawrs"` /// * Zfa: `"zfa"` @@ -195,6 +196,8 @@ features! { /// "Zaamo" Extension for Atomic Memory Operations @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zawrs: "zawrs"; /// "Zawrs" Extension for Wait-on-Reservation-Set Instructions + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zabha: "zabha"; + /// "Zabha" Extension for Byte and Halfword Atomic Memory Operations @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zacas: "zacas"; /// "Zacas" Extension for Atomic Compare-and-Swap (CAS) Instructions @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zam: "zam"; diff --git a/library/std_detect/src/detect/os/linux/riscv.rs b/library/std_detect/src/detect/os/linux/riscv.rs index dbb3664890e56..18f9f68ec67ef 100644 --- a/library/std_detect/src/detect/os/linux/riscv.rs +++ b/library/std_detect/src/detect/os/linux/riscv.rs @@ -10,13 +10,13 @@ use super::super::riscv::imply_features; use super::auxvec; use crate::detect::{Feature, bit, cache}; -// See +// See // for runtime status query constants. const PR_RISCV_V_GET_CONTROL: libc::c_int = 70; const PR_RISCV_V_VSTATE_CTRL_ON: libc::c_int = 2; const PR_RISCV_V_VSTATE_CTRL_CUR_MASK: libc::c_int = 3; -// See +// See // for riscv_hwprobe struct and hardware probing constants. #[repr(C)] @@ -98,6 +98,7 @@ const RISCV_HWPROBE_EXT_ZVFBFWMA: u64 = 1 << 54; const RISCV_HWPROBE_EXT_ZICBOM: u64 = 1 << 55; const RISCV_HWPROBE_EXT_ZAAMO: u64 = 1 << 56; const RISCV_HWPROBE_EXT_ZALRSC: u64 = 1 << 57; +const RISCV_HWPROBE_EXT_ZABHA: u64 = 1 << 58; const RISCV_HWPROBE_KEY_CPUPERF_0: i64 = 5; const RISCV_HWPROBE_MISALIGNED_FAST: u64 = 3; @@ -138,7 +139,7 @@ pub(crate) fn detect_features() -> cache::Initializer { // Use auxiliary vector to enable single-letter ISA extensions. // The values are part of the platform-specific [asm/hwcap.h][hwcap] // - // [hwcap]: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/arch/riscv/include/uapi/asm/hwcap.h?h=v6.15 + // [hwcap]: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/arch/riscv/include/uapi/asm/hwcap.h?h=v6.16 let auxv = auxvec::auxv().expect("read auxvec"); // should not fail on RISC-V platform let mut has_i = bit::test(auxv.hwcap, (b'i' - b'a').into()); #[allow(clippy::eq_op)] @@ -233,6 +234,7 @@ pub(crate) fn detect_features() -> cache::Initializer { enable_feature(Feature::zalrsc, test(RISCV_HWPROBE_EXT_ZALRSC)); enable_feature(Feature::zaamo, test(RISCV_HWPROBE_EXT_ZAAMO)); enable_feature(Feature::zawrs, test(RISCV_HWPROBE_EXT_ZAWRS)); + enable_feature(Feature::zabha, test(RISCV_HWPROBE_EXT_ZABHA)); enable_feature(Feature::zacas, test(RISCV_HWPROBE_EXT_ZACAS)); enable_feature(Feature::ztso, test(RISCV_HWPROBE_EXT_ZTSO)); diff --git a/library/std_detect/src/detect/os/riscv.rs b/library/std_detect/src/detect/os/riscv.rs index dc9a4036d86a1..c6acbd3525bd3 100644 --- a/library/std_detect/src/detect/os/riscv.rs +++ b/library/std_detect/src/detect/os/riscv.rs @@ -90,7 +90,7 @@ pub(crate) fn imply_features(mut value: cache::Initializer) -> cache::Initialize group!(zks == zbkb & zbkc & zbkx & zksed & zksh); group!(zk == zkn & zkr & zkt); - imply!(zacas => zaamo); + imply!(zabha | zacas => zaamo); group!(a == zalrsc & zaamo); group!(b == zba & zbb & zbs); diff --git a/library/std_detect/tests/cpu-detection.rs b/library/std_detect/tests/cpu-detection.rs index 5ad32d83237ce..0c4fa57f2b465 100644 --- a/library/std_detect/tests/cpu-detection.rs +++ b/library/std_detect/tests/cpu-detection.rs @@ -242,6 +242,7 @@ fn riscv_linux() { println!("zalrsc: {}", is_riscv_feature_detected!("zalrsc")); println!("zaamo: {}", is_riscv_feature_detected!("zaamo")); println!("zawrs: {}", is_riscv_feature_detected!("zawrs")); + println!("zabha: {}", is_riscv_feature_detected!("zabha")); println!("zacas: {}", is_riscv_feature_detected!("zacas")); println!("zam: {}", is_riscv_feature_detected!("zam")); println!("ztso: {}", is_riscv_feature_detected!("ztso")); diff --git a/library/test/src/lib.rs b/library/test/src/lib.rs index 7f56d1e362698..1190bb56b97a0 100644 --- a/library/test/src/lib.rs +++ b/library/test/src/lib.rs @@ -89,8 +89,8 @@ use options::RunStrategy; use test_result::*; use time::TestExecTime; -// Process exit code to be used to indicate test failures. -const ERROR_EXIT_CODE: i32 = 101; +/// Process exit code to be used to indicate test failures. +pub const ERROR_EXIT_CODE: i32 = 101; const SECONDARY_TEST_INVOKER_VAR: &str = "__RUST_TEST_INVOKE"; const SECONDARY_TEST_BENCH_BENCHMARKS_VAR: &str = "__RUST_TEST_BENCH_BENCHMARKS"; diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 23c9fac486534..b3b889ccf7063 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -2,5 +2,5 @@ # standard library we currently track. [toolchain] -channel = "nightly-2025-07-31" +channel = "nightly-2025-08-01" components = ["llvm-tools-preview", "rustc-dev", "rust-src", "rustfmt"] diff --git a/scripts/kani-std-analysis/log_parser.py b/scripts/kani-std-analysis/log_parser.py index f9103fa892a0f..32b7c7f9c1c03 100755 --- a/scripts/kani-std-analysis/log_parser.py +++ b/scripts/kani-std-analysis/log_parser.py @@ -137,7 +137,7 @@ def read_kani_list(kani_list_file, scanner_data): } elif len(fn_info.keys()) > 1: crates = list(fn_info.keys()) - target_safenesses = [e['target_safenesses'] + target_safenesses = [e['target_safeness'] for _, e in fn_info.items()] public_targets = [e['public_target'] for _, e in fn_info.items()] @@ -185,7 +185,7 @@ def read_kani_list(kani_list_file, scanner_data): if fn_info is not None: if len(fn_info.keys()) > 1: crates = list(fn_info.keys()) - target_safenesses = [e['target_safenesses'] + target_safenesses = [e['target_safeness'] for _, e in fn_info.items()] public_targets = [e['public_target'] for _, e in fn_info.items()] @@ -236,14 +236,19 @@ def init_entry( if crate is None: print(f'No autoharness info for {harness}', file=sys.stderr) else: - if autoharness_info.get(crate) is None: - print(f'No autoharness info for {crate}', - file=sys.stderr) + if isinstance(crate, list): + crate_list = crate else: - if autoharness_info[crate][harness] != 'ok': - print(f'Unexpected autoharness info for {harness} in {crate}', + crate_list = [crate] + for c in crate_list: + if autoharness_info.get(c) is None: + print(f'No autoharness info for {c}', file=sys.stderr) - del autoharness_info[crate][harness] + else: + if autoharness_info[c][harness] != 'ok': + print(f'Unexpected autoharness info for {harness} in {c}', + file=sys.stderr) + del autoharness_info[c][harness] autoharness_result = 'ok' else: fn = harness_map_entry['function'] @@ -304,7 +309,7 @@ def create_autoharness_result( } elif len(fn_info.keys()) > 1: crates = list(fn_info.keys()) - target_safenesses = [e['target_safenesses'] + target_safenesses = [e['target_safeness'] for _, e in fn_info.items()] public_targets = [e['public_target'] for _, e in fn_info.items()] @@ -505,7 +510,7 @@ def parse_log_lines( del active_threads[thread_id] thread_id = None elif property_match := property_pattern.match(line): - assert thread_id is not None + assert thread_id is not None, f'No known thread id at line {i}' active_threads[thread_id]['n_failed_properties'] = int( property_match.group(1)) active_threads[thread_id]['n_total_properties'] = int( diff --git a/scripts/run-kani.sh b/scripts/run-kani.sh index 7714a813cc0bb..f774a96f12ca8 100755 --- a/scripts/run-kani.sh +++ b/scripts/run-kani.sh @@ -182,7 +182,7 @@ get_kani_path() { # then update EXPECTED_JSON_FILE_VERSION. get_harnesses() { local kani_path="$1" - "$kani_path" list -Z list $unstable_args ./library --std --format json + "$kani_path" list $unstable_args ./library --std --format json local json_file_version=$(jq -r '.["file-version"]' "$WORK_DIR/kani-list.json") if [[ $json_file_version != $EXPECTED_JSON_FILE_VERSION ]]; then echo "Error: The JSON file-version in kani-list.json does not equal $EXPECTED_JSON_FILE_VERSION" @@ -313,17 +313,17 @@ main() { elif [[ "$run_command" == "list" ]]; then echo "Running Kani list command..." if [[ "$with_autoharness" == "true" ]]; then - "$kani_path" autoharness -Z autoharness --list -Z list $unstable_args --std ./library --format markdown + "$kani_path" autoharness -Z autoharness --list $unstable_args --std ./library --format markdown else - "$kani_path" list -Z list $unstable_args ./library --std --format markdown + "$kani_path" list $unstable_args ./library --std --format markdown fi elif [[ "$run_command" == "metrics" ]]; then local current_dir=$(pwd) echo "Running Kani list command..." if [[ "$with_autoharness" == "true" ]]; then - "$kani_path" autoharness -Z autoharness --list -Z list $unstable_args --std ./library --format json + "$kani_path" autoharness -Z autoharness --list $unstable_args --std ./library --format json else - "$kani_path" list -Z list $unstable_args ./library --std --format json + "$kani_path" list $unstable_args ./library --std --format json fi pushd scripts/kani-std-analysis echo "Running Kani's std-analysis command..." diff --git a/tool_config/kani-version.toml b/tool_config/kani-version.toml index a50a4f9395213..9b03a8a4dacf7 100644 --- a/tool_config/kani-version.toml +++ b/tool_config/kani-version.toml @@ -2,4 +2,4 @@ # incompatible with the verify-std repo. [kani] -commit = "f76e668b5f86daad11e51d78ab10998426da943b" +commit = "53a7a3b516c5cbe504378d4a208d07d49b5aa4ec" diff --git a/verifast-proofs/alloc/collections/linked_list.rs-negative/original/linked_list.rs b/verifast-proofs/alloc/collections/linked_list.rs-negative/original/linked_list.rs index 70c344e49b76a..31dfe73fc7992 100644 --- a/verifast-proofs/alloc/collections/linked_list.rs-negative/original/linked_list.rs +++ b/verifast-proofs/alloc/collections/linked_list.rs-negative/original/linked_list.rs @@ -825,7 +825,7 @@ impl LinkedList { unsafe { self.tail.as_mut().map(|node| &mut node.as_mut().element) } } - /// Adds an element first in the list. + /// Adds an element to the front of the list. /// /// This operation should compute in *O*(1) time. /// @@ -844,11 +844,34 @@ impl LinkedList { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn push_front(&mut self, elt: T) { + let _ = self.push_front_mut(elt); + } + + /// Adds an element to the front of the list, returning a reference to it. + /// + /// This operation should compute in *O*(1) time. + /// + /// # Examples + /// + /// ``` + /// #![feature(push_mut)] + /// use std::collections::LinkedList; + /// + /// let mut dl = LinkedList::from([1, 2, 3]); + /// + /// let ptr = dl.push_front_mut(2); + /// *ptr += 4; + /// assert_eq!(dl.front().unwrap(), &6); + /// ``` + #[unstable(feature = "push_mut", issue = "135974")] + #[must_use = "if you don't need a reference to the value, use `LinkedList::push_front` instead"] + pub fn push_front_mut(&mut self, elt: T) -> &mut T { let node = Box::new_in(Node::new(elt), &self.alloc); - let node_ptr = NonNull::from(Box::leak(node)); + let mut node_ptr = NonNull::from(Box::leak(node)); // SAFETY: node_ptr is a unique pointer to a node we boxed with self.alloc and leaked unsafe { self.push_front_node(node_ptr); + &mut node_ptr.as_mut().element } } @@ -876,7 +899,7 @@ impl LinkedList { self.pop_front_node().map(Node::into_element) } - /// Appends an element to the back of a list. + /// Adds an element to the back of the list. /// /// This operation should compute in *O*(1) time. /// @@ -893,11 +916,34 @@ impl LinkedList { #[stable(feature = "rust1", since = "1.0.0")] #[rustc_confusables("push", "append")] pub fn push_back(&mut self, elt: T) { + let _ = self.push_back_mut(elt); + } + + /// Adds an element to the back of the list, returning a reference to it. + /// + /// This operation should compute in *O*(1) time. + /// + /// # Examples + /// + /// ``` + /// #![feature(push_mut)] + /// use std::collections::LinkedList; + /// + /// let mut dl = LinkedList::from([1, 2, 3]); + /// + /// let ptr = dl.push_back_mut(2); + /// *ptr += 4; + /// assert_eq!(dl.back().unwrap(), &6); + /// ``` + #[unstable(feature = "push_mut", issue = "135974")] + #[must_use = "if you don't need a reference to the value, use `LinkedList::push_back` instead"] + pub fn push_back_mut(&mut self, elt: T) -> &mut T { let node = Box::new_in(Node::new(elt), &self.alloc); - let node_ptr = NonNull::from(Box::leak(node)); + let mut node_ptr = NonNull::from(Box::leak(node)); // SAFETY: node_ptr is a unique pointer to a node we boxed with self.alloc and leaked unsafe { self.push_back_node(node_ptr); + &mut node_ptr.as_mut().element } } diff --git a/verifast-proofs/alloc/collections/linked_list.rs-negative/verified/linked_list.rs b/verifast-proofs/alloc/collections/linked_list.rs-negative/verified/linked_list.rs index 80eaadddd2e31..cd6c1c552696d 100644 --- a/verifast-proofs/alloc/collections/linked_list.rs-negative/verified/linked_list.rs +++ b/verifast-proofs/alloc/collections/linked_list.rs-negative/verified/linked_list.rs @@ -990,7 +990,7 @@ impl LinkedList { unsafe { self.tail.as_mut().map(|node| &mut node.as_mut().element) } } - /// Adds an element first in the list. + /// Adds an element to the front of the list. /// /// This operation should compute in *O*(1) time. /// @@ -1009,11 +1009,34 @@ impl LinkedList { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn push_front(&mut self, elt: T) { + let _ = self.push_front_mut(elt); + } + + /// Adds an element to the front of the list, returning a reference to it. + /// + /// This operation should compute in *O*(1) time. + /// + /// # Examples + /// + /// ``` + /// #![feature(push_mut)] + /// use std::collections::LinkedList; + /// + /// let mut dl = LinkedList::from([1, 2, 3]); + /// + /// let ptr = dl.push_front_mut(2); + /// *ptr += 4; + /// assert_eq!(dl.front().unwrap(), &6); + /// ``` + #[unstable(feature = "push_mut", issue = "135974")] + #[must_use = "if you don't need a reference to the value, use `LinkedList::push_front` instead"] + pub fn push_front_mut(&mut self, elt: T) -> &mut T { let node = Box::new_in(Node::new(elt), &self.alloc); - let node_ptr = NonNull::from(Box::leak(node)); + let mut node_ptr = NonNull::from(Box::leak(node)); // SAFETY: node_ptr is a unique pointer to a node we boxed with self.alloc and leaked unsafe { self.push_front_node(node_ptr); + &mut node_ptr.as_mut().element } } @@ -1041,7 +1064,7 @@ impl LinkedList { self.pop_front_node().map(Node::into_element) } - /// Appends an element to the back of a list. + /// Adds an element to the back of the list. /// /// This operation should compute in *O*(1) time. /// @@ -1058,11 +1081,34 @@ impl LinkedList { #[stable(feature = "rust1", since = "1.0.0")] #[rustc_confusables("push", "append")] pub fn push_back(&mut self, elt: T) { + let _ = self.push_back_mut(elt); + } + + /// Adds an element to the back of the list, returning a reference to it. + /// + /// This operation should compute in *O*(1) time. + /// + /// # Examples + /// + /// ``` + /// #![feature(push_mut)] + /// use std::collections::LinkedList; + /// + /// let mut dl = LinkedList::from([1, 2, 3]); + /// + /// let ptr = dl.push_back_mut(2); + /// *ptr += 4; + /// assert_eq!(dl.back().unwrap(), &6); + /// ``` + #[unstable(feature = "push_mut", issue = "135974")] + #[must_use = "if you don't need a reference to the value, use `LinkedList::push_back` instead"] + pub fn push_back_mut(&mut self, elt: T) -> &mut T { let node = Box::new_in(Node::new(elt), &self.alloc); - let node_ptr = NonNull::from(Box::leak(node)); + let mut node_ptr = NonNull::from(Box::leak(node)); // SAFETY: node_ptr is a unique pointer to a node we boxed with self.alloc and leaked unsafe { self.push_back_node(node_ptr); + &mut node_ptr.as_mut().element } } diff --git a/verifast-proofs/alloc/collections/linked_list.rs/original/linked_list.rs b/verifast-proofs/alloc/collections/linked_list.rs/original/linked_list.rs index 70c344e49b76a..31dfe73fc7992 100644 --- a/verifast-proofs/alloc/collections/linked_list.rs/original/linked_list.rs +++ b/verifast-proofs/alloc/collections/linked_list.rs/original/linked_list.rs @@ -825,7 +825,7 @@ impl LinkedList { unsafe { self.tail.as_mut().map(|node| &mut node.as_mut().element) } } - /// Adds an element first in the list. + /// Adds an element to the front of the list. /// /// This operation should compute in *O*(1) time. /// @@ -844,11 +844,34 @@ impl LinkedList { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn push_front(&mut self, elt: T) { + let _ = self.push_front_mut(elt); + } + + /// Adds an element to the front of the list, returning a reference to it. + /// + /// This operation should compute in *O*(1) time. + /// + /// # Examples + /// + /// ``` + /// #![feature(push_mut)] + /// use std::collections::LinkedList; + /// + /// let mut dl = LinkedList::from([1, 2, 3]); + /// + /// let ptr = dl.push_front_mut(2); + /// *ptr += 4; + /// assert_eq!(dl.front().unwrap(), &6); + /// ``` + #[unstable(feature = "push_mut", issue = "135974")] + #[must_use = "if you don't need a reference to the value, use `LinkedList::push_front` instead"] + pub fn push_front_mut(&mut self, elt: T) -> &mut T { let node = Box::new_in(Node::new(elt), &self.alloc); - let node_ptr = NonNull::from(Box::leak(node)); + let mut node_ptr = NonNull::from(Box::leak(node)); // SAFETY: node_ptr is a unique pointer to a node we boxed with self.alloc and leaked unsafe { self.push_front_node(node_ptr); + &mut node_ptr.as_mut().element } } @@ -876,7 +899,7 @@ impl LinkedList { self.pop_front_node().map(Node::into_element) } - /// Appends an element to the back of a list. + /// Adds an element to the back of the list. /// /// This operation should compute in *O*(1) time. /// @@ -893,11 +916,34 @@ impl LinkedList { #[stable(feature = "rust1", since = "1.0.0")] #[rustc_confusables("push", "append")] pub fn push_back(&mut self, elt: T) { + let _ = self.push_back_mut(elt); + } + + /// Adds an element to the back of the list, returning a reference to it. + /// + /// This operation should compute in *O*(1) time. + /// + /// # Examples + /// + /// ``` + /// #![feature(push_mut)] + /// use std::collections::LinkedList; + /// + /// let mut dl = LinkedList::from([1, 2, 3]); + /// + /// let ptr = dl.push_back_mut(2); + /// *ptr += 4; + /// assert_eq!(dl.back().unwrap(), &6); + /// ``` + #[unstable(feature = "push_mut", issue = "135974")] + #[must_use = "if you don't need a reference to the value, use `LinkedList::push_back` instead"] + pub fn push_back_mut(&mut self, elt: T) -> &mut T { let node = Box::new_in(Node::new(elt), &self.alloc); - let node_ptr = NonNull::from(Box::leak(node)); + let mut node_ptr = NonNull::from(Box::leak(node)); // SAFETY: node_ptr is a unique pointer to a node we boxed with self.alloc and leaked unsafe { self.push_back_node(node_ptr); + &mut node_ptr.as_mut().element } } diff --git a/verifast-proofs/alloc/collections/linked_list.rs/verified/linked_list.rs b/verifast-proofs/alloc/collections/linked_list.rs/verified/linked_list.rs index 8613e9b456571..286e12d9bf40f 100644 --- a/verifast-proofs/alloc/collections/linked_list.rs/verified/linked_list.rs +++ b/verifast-proofs/alloc/collections/linked_list.rs/verified/linked_list.rs @@ -990,7 +990,7 @@ impl LinkedList { unsafe { self.tail.as_mut().map(|node| &mut node.as_mut().element) } } - /// Adds an element first in the list. + /// Adds an element to the front of the list. /// /// This operation should compute in *O*(1) time. /// @@ -1009,11 +1009,34 @@ impl LinkedList { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn push_front(&mut self, elt: T) { + let _ = self.push_front_mut(elt); + } + + /// Adds an element to the front of the list, returning a reference to it. + /// + /// This operation should compute in *O*(1) time. + /// + /// # Examples + /// + /// ``` + /// #![feature(push_mut)] + /// use std::collections::LinkedList; + /// + /// let mut dl = LinkedList::from([1, 2, 3]); + /// + /// let ptr = dl.push_front_mut(2); + /// *ptr += 4; + /// assert_eq!(dl.front().unwrap(), &6); + /// ``` + #[unstable(feature = "push_mut", issue = "135974")] + #[must_use = "if you don't need a reference to the value, use `LinkedList::push_front` instead"] + pub fn push_front_mut(&mut self, elt: T) -> &mut T { let node = Box::new_in(Node::new(elt), &self.alloc); - let node_ptr = NonNull::from(Box::leak(node)); + let mut node_ptr = NonNull::from(Box::leak(node)); // SAFETY: node_ptr is a unique pointer to a node we boxed with self.alloc and leaked unsafe { self.push_front_node(node_ptr); + &mut node_ptr.as_mut().element } } @@ -1041,7 +1064,7 @@ impl LinkedList { self.pop_front_node().map(Node::into_element) } - /// Appends an element to the back of a list. + /// Adds an element to the back of the list. /// /// This operation should compute in *O*(1) time. /// @@ -1058,11 +1081,34 @@ impl LinkedList { #[stable(feature = "rust1", since = "1.0.0")] #[rustc_confusables("push", "append")] pub fn push_back(&mut self, elt: T) { + let _ = self.push_back_mut(elt); + } + + /// Adds an element to the back of the list, returning a reference to it. + /// + /// This operation should compute in *O*(1) time. + /// + /// # Examples + /// + /// ``` + /// #![feature(push_mut)] + /// use std::collections::LinkedList; + /// + /// let mut dl = LinkedList::from([1, 2, 3]); + /// + /// let ptr = dl.push_back_mut(2); + /// *ptr += 4; + /// assert_eq!(dl.back().unwrap(), &6); + /// ``` + #[unstable(feature = "push_mut", issue = "135974")] + #[must_use = "if you don't need a reference to the value, use `LinkedList::push_back` instead"] + pub fn push_back_mut(&mut self, elt: T) -> &mut T { let node = Box::new_in(Node::new(elt), &self.alloc); - let node_ptr = NonNull::from(Box::leak(node)); + let mut node_ptr = NonNull::from(Box::leak(node)); // SAFETY: node_ptr is a unique pointer to a node we boxed with self.alloc and leaked unsafe { self.push_back_node(node_ptr); + &mut node_ptr.as_mut().element } } From 8e9f2b7ffb43e7afab6852f21dd660240b1ca9aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafael=20S=C3=A1=20Menezes?= Date: Wed, 6 Aug 2025 13:37:29 +0100 Subject: [PATCH 18/20] Update ESBMC link (#448) The previous link for ESBMC is now dead, breaking the CI. This updates goto transcoder to use the latest version of ESBMC. --- scripts/run-goto-transcoder.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/run-goto-transcoder.sh b/scripts/run-goto-transcoder.sh index a662ca75a69f9..e11a012ed4556 100755 --- a/scripts/run-goto-transcoder.sh +++ b/scripts/run-goto-transcoder.sh @@ -10,7 +10,7 @@ supported_regex=$2 unsupported_regex=neg goto_transcoder_git=https://github.com/esbmc/goto-transcoder -esbmc_url=https://github.com/esbmc/esbmc/releases/download/nightly-39b012f9f7f7dad188708a9eaf4bbbc5faa3b4f7/esbmc-linux.zip +esbmc_url=https://github.com/esbmc/esbmc/releases/download/v7.10/esbmc-linux.zip ########## # SCRIPT # @@ -24,7 +24,7 @@ if [ ! -d "goto-transcoder" ]; then cd goto-transcoder wget $esbmc_url unzip esbmc-linux.zip - chmod +x ./linux-release/bin/esbmc + chmod +x ./bin/esbmc cd .. fi @@ -45,7 +45,7 @@ while IFS= read -r line; do fi echo "Running: goto-transcoder $contract $contract_folder/$line $contract.esbmc.goto" cargo run cbmc2esbmc ../$contract_folder/$line $contract.esbmc.goto - ./linux-release/bin/esbmc --cprover --function $contract --binary resources/library.goto $contract.esbmc.goto + ./bin/esbmc --cprover --function $contract --binary resources/library.goto $contract.esbmc.goto done < "_contracts.txt" rm "_contracts.txt" From 517e11e30b0259cd4ee6fe2fa712dd69de40b6fc Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 6 Aug 2025 15:48:38 +0000 Subject: [PATCH 19/20] Merge subtree update for toolchain nightly-2025-08-06 (#447) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is an automated PR to merge library subtree updates from 2025-08-01 (rust-lang/rust@adcb3d3b4cd3b7c4cde642f3ed537037f293738e) to 2025-08-06 (rust-lang/rust@ec7c02612527d185c379900b613311bc1dcbf7dc) (inclusive) into main. `git merge` resulted in conflicts, which require manual resolution. Files were commited with merge conflict markers. **Do not remove or edit the following annotations:** git-subtree-dir: library git-subtree-split: 7112d068f717b1331c443aad87fd643e3399c1d5 --------- Signed-off-by: Ayush Singh Co-authored-by: Folkert de Vries Co-authored-by: Matthias Krüger <476013+matthiaskrgr@users.noreply.github.com> Co-authored-by: Deadbeef Co-authored-by: bors Co-authored-by: René Kijewski Co-authored-by: Nik Revenco Co-authored-by: The Miri Cronjob Bot Co-authored-by: Tim (Theemathas) Chirananthavat Co-authored-by: Jakub Beránek Co-authored-by: Chris Denton Co-authored-by: Samuel Tardieu Co-authored-by: Travis Cross Co-authored-by: Amanieu d'Antras Co-authored-by: Oli Scherer Co-authored-by: David Mládek Co-authored-by: SunkenPotato <3.14radius02@gmail.com> Co-authored-by: Ralf Jung Co-authored-by: Cameron Steffen Co-authored-by: Martin Ombura Jr <8682597+martinomburajr@users.noreply.github.com> Co-authored-by: Madhav Madhusoodanan Co-authored-by: sayantn Co-authored-by: León Orell Valerian Liehr Co-authored-by: Trevor Gross Co-authored-by: WANG Rui Co-authored-by: Folkert de Vries Co-authored-by: Luigi Sartor Piucco Co-authored-by: Julien THILLARD <54775010+supersurviveur@users.noreply.github.com> Co-authored-by: The rustc-josh-sync Cronjob Bot Co-authored-by: Marijn Schouten Co-authored-by: usamoi Co-authored-by: Rémy Rakic Co-authored-by: Alisa Sireneva Co-authored-by: Josh Triplett Co-authored-by: Nurzhan Sakén Co-authored-by: ltdk Co-authored-by: bjorn3 <17426603+bjorn3@users.noreply.github.com> Co-authored-by: Guillaume Gomez Co-authored-by: Evgenii Zheltonozhskii Co-authored-by: roblabla Co-authored-by: 许杰友 Jieyou Xu (Joe) <39484203+jieyouxu@users.noreply.github.com> Co-authored-by: Ayush Singh Co-authored-by: Kornel Co-authored-by: Jonas Platte Co-authored-by: Alex Crichton Co-authored-by: Zachary S Co-authored-by: Jeremy Smart Co-authored-by: Ivan Tadeu Ferreira Antunes Filho Co-authored-by: okaneco <47607823+okaneco@users.noreply.github.com> Co-authored-by: Trevor Gross Co-authored-by: Jacob Pratt Co-authored-by: Scott McMurray Co-authored-by: xonx <119700621+xonx4l@users.noreply.github.com> Co-authored-by: Yosh Co-authored-by: joboet Co-authored-by: Stuart Cook Co-authored-by: Connor Tsui Co-authored-by: Aandreba Co-authored-by: Lucas Werkmeister Co-authored-by: Orson Peters Co-authored-by: Balt <59123926+balt-dev@users.noreply.github.com> Co-authored-by: Tsukasa OI Co-authored-by: Nico Lehmann Co-authored-by: stifskere Co-authored-by: Kivooeo Co-authored-by: gitbot Co-authored-by: Michael Tautschnig Co-authored-by: Felipe R. Monteiro --- library/Cargo.lock | 23 ++-- library/alloc/src/string.rs | 4 +- library/alloc/src/vec/mod.rs | 10 +- library/core/src/array/equality.rs | 28 ++--- library/core/src/cell.rs | 6 +- library/core/src/cell/lazy.rs | 61 ++++++++- library/core/src/hint.rs | 2 - library/core/src/iter/mod.rs | 6 +- library/core/src/iter/traits/iterator.rs | 2 +- library/core/src/macros/mod.rs | 10 +- library/core/src/marker.rs | 3 +- library/core/src/mem/maybe_uninit.rs | 21 ++-- library/core/src/num/niche_types.rs | 6 +- library/core/src/primitive_docs.rs | 2 +- library/core/src/ptr/alignment.rs | 8 +- library/core/src/result.rs | 139 +++++++++++++++++---- library/core/src/slice/mod.rs | 3 +- library/coretests/tests/lib.rs | 2 + library/coretests/tests/result.rs | 83 ++++++++++++ library/panic_abort/Cargo.toml | 3 +- library/panic_unwind/Cargo.toml | 5 +- library/rustc-std-workspace-core/README.md | 6 + library/std/Cargo.toml | 1 - library/std/src/lib.rs | 2 + library/std/src/macros.rs | 74 +++++++++++ library/std/src/panic.rs | 2 +- library/std/src/panicking.rs | 14 +-- library/std/src/path.rs | 5 +- library/std/src/sync/lazy_lock.rs | 60 ++++++++- library/std/src/sync/once_lock.rs | 2 + library/std/src/sync/poison.rs | 24 ++-- library/std/src/sync/poison/mutex.rs | 70 +++++++++-- library/std/src/sync/poison/once.rs | 6 +- library/std/src/sync/poison/rwlock.rs | 10 +- library/std/src/sys/pal/uefi/helpers.rs | 22 ++-- library/std/src/thread/mod.rs | 3 + library/unwind/Cargo.toml | 3 +- rust-toolchain.toml | 2 +- tool_config/kani-version.toml | 2 +- 39 files changed, 581 insertions(+), 154 deletions(-) diff --git a/library/Cargo.lock b/library/Cargo.lock index a9a611fe1ed56..09228825086ee 100644 --- a/library/Cargo.lock +++ b/library/Cargo.lock @@ -169,9 +169,9 @@ dependencies = [ [[package]] name = "object" -version = "0.37.1" +version = "0.37.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03fd943161069e1768b4b3d050890ba48730e590f57e56d4aa04e7e090e61b4a" +checksum = "b3e3d0a7419f081f4a808147e845310313a39f322d7ae1f996b7f001d6cbed04" dependencies = [ "memchr", "rustc-std-workspace-alloc", @@ -183,9 +183,8 @@ name = "panic_abort" version = "0.0.0" dependencies = [ "alloc", - "compiler_builtins", - "core", "libc", + "rustc-std-workspace-core", ] [[package]] @@ -194,9 +193,8 @@ version = "0.0.0" dependencies = [ "alloc", "cfg-if", - "compiler_builtins", - "core", "libc", + "rustc-std-workspace-core", "unwind", ] @@ -261,9 +259,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.25" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "989e6739f80c4ad5b13e0fd7fe89531180375b18520cc8c82080e4dc4035b84f" +checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" dependencies = [ "rustc-std-workspace-core", ] @@ -313,7 +311,6 @@ dependencies = [ "addr2line", "alloc", "cfg-if", - "compiler_builtins", "core", "dlmalloc", "fortanix-sgx-abi", @@ -380,19 +377,17 @@ name = "unwind" version = "0.0.0" dependencies = [ "cfg-if", - "compiler_builtins", - "core", "libc", + "rustc-std-workspace-core", "unwinding", ] [[package]] name = "unwinding" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d80f6c2bfede213d9a90b4a14f3eb99b84e33c52df6c1a15de0a100f5a88751" +checksum = "60612c845ef41699f39dc8c5391f252942c0a88b7d15da672eff0d14101bbd6d" dependencies = [ - "compiler_builtins", "gimli", "rustc-std-workspace-core", ] diff --git a/library/alloc/src/string.rs b/library/alloc/src/string.rs index d58240f3051e0..9eacbf00e4233 100644 --- a/library/alloc/src/string.rs +++ b/library/alloc/src/string.rs @@ -265,12 +265,12 @@ use crate::vec::{self, Vec}; /// You can look at these with the [`as_ptr`], [`len`], and [`capacity`] /// methods: /// +// FIXME Update this when vec_into_raw_parts is stabilized /// ``` /// use std::mem; /// /// let story = String::from("Once upon a time..."); /// -// FIXME Update this when vec_into_raw_parts is stabilized /// // Prevent automatically dropping the String's data /// let mut story = mem::ManuallyDrop::new(story); /// @@ -970,13 +970,13 @@ impl String { /// /// # Examples /// + // FIXME Update this when vec_into_raw_parts is stabilized /// ``` /// use std::mem; /// /// unsafe { /// let s = String::from("hello"); /// - // FIXME Update this when vec_into_raw_parts is stabilized /// // Prevent automatically dropping the String's data /// let mut s = mem::ManuallyDrop::new(s); /// diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs index 65db56710b51b..fe672d14683df 100644 --- a/library/alloc/src/vec/mod.rs +++ b/library/alloc/src/vec/mod.rs @@ -566,13 +566,13 @@ impl Vec { /// /// # Examples /// + // FIXME Update this when vec_into_raw_parts is stabilized /// ``` /// use std::ptr; /// use std::mem; /// /// let v = vec![1, 2, 3]; /// - // FIXME Update this when vec_into_raw_parts is stabilized /// // Prevent running `v`'s destructor so we are in complete control /// // of the allocation. /// let mut v = mem::ManuallyDrop::new(v); @@ -674,6 +674,7 @@ impl Vec { /// /// # Examples /// + // FIXME Update this when vec_into_raw_parts is stabilized /// ``` /// #![feature(box_vec_non_null)] /// @@ -682,7 +683,6 @@ impl Vec { /// /// let v = vec![1, 2, 3]; /// - // FIXME Update this when vec_into_raw_parts is stabilized /// // Prevent running `v`'s destructor so we are in complete control /// // of the allocation. /// let mut v = mem::ManuallyDrop::new(v); @@ -994,6 +994,7 @@ impl Vec { /// /// # Examples /// + // FIXME Update this when vec_into_raw_parts is stabilized /// ``` /// #![feature(allocator_api)] /// @@ -1007,7 +1008,6 @@ impl Vec { /// v.push(2); /// v.push(3); /// - // FIXME Update this when vec_into_raw_parts is stabilized /// // Prevent running `v`'s destructor so we are in complete control /// // of the allocation. /// let mut v = mem::ManuallyDrop::new(v); @@ -1114,6 +1114,7 @@ impl Vec { /// /// # Examples /// + // FIXME Update this when vec_into_raw_parts is stabilized /// ``` /// #![feature(allocator_api, box_vec_non_null)] /// @@ -1127,7 +1128,6 @@ impl Vec { /// v.push(2); /// v.push(3); /// - // FIXME Update this when vec_into_raw_parts is stabilized /// // Prevent running `v`'s destructor so we are in complete control /// // of the allocation. /// let mut v = mem::ManuallyDrop::new(v); @@ -3872,8 +3872,8 @@ impl Vec { /// while i < vec.len() - end_items { /// if some_predicate(&mut vec[i]) { /// let val = vec.remove(i); - /// # extracted.push(val); /// // your code here + /// # extracted.push(val); /// } else { /// i += 1; /// } diff --git a/library/core/src/array/equality.rs b/library/core/src/array/equality.rs index bb668d2a67309..1ad2cca64a347 100644 --- a/library/core/src/array/equality.rs +++ b/library/core/src/array/equality.rs @@ -22,18 +22,16 @@ where { #[inline] fn eq(&self, other: &[U]) -> bool { - let b: Result<&[U; N], _> = other.try_into(); - match b { - Ok(b) => *self == *b, - Err(_) => false, + match other.as_array::() { + Some(b) => *self == *b, + None => false, } } #[inline] fn ne(&self, other: &[U]) -> bool { - let b: Result<&[U; N], _> = other.try_into(); - match b { - Ok(b) => *self != *b, - Err(_) => true, + match other.as_array::() { + Some(b) => *self != *b, + None => true, } } } @@ -45,18 +43,16 @@ where { #[inline] fn eq(&self, other: &[U; N]) -> bool { - let b: Result<&[T; N], _> = self.try_into(); - match b { - Ok(b) => *b == *other, - Err(_) => false, + match self.as_array::() { + Some(b) => *b == *other, + None => false, } } #[inline] fn ne(&self, other: &[U; N]) -> bool { - let b: Result<&[T; N], _> = self.try_into(); - match b { - Ok(b) => *b != *other, - Err(_) => true, + match self.as_array::() { + Some(b) => *b != *other, + None => true, } } } diff --git a/library/core/src/cell.rs b/library/core/src/cell.rs index d6d1bf2effae2..d67408cae1b95 100644 --- a/library/core/src/cell.rs +++ b/library/core/src/cell.rs @@ -2068,9 +2068,9 @@ impl fmt::Display for RefMut<'_, T> { /// implies exclusive access to its `T`: /// /// ```rust -/// #![forbid(unsafe_code)] // with exclusive accesses, -/// // `UnsafeCell` is a transparent no-op wrapper, -/// // so no need for `unsafe` here. +/// #![forbid(unsafe_code)] +/// // with exclusive accesses, `UnsafeCell` is a transparent no-op wrapper, so no need for +/// // `unsafe` here. /// use std::cell::UnsafeCell; /// /// let mut x: UnsafeCell = 42.into(); diff --git a/library/core/src/cell/lazy.rs b/library/core/src/cell/lazy.rs index 1758e84ad7cdf..a1bd4c8571706 100644 --- a/library/core/src/cell/lazy.rs +++ b/library/core/src/cell/lazy.rs @@ -15,6 +15,22 @@ enum State { /// /// [`std::sync::LazyLock`]: ../../std/sync/struct.LazyLock.html /// +/// # Poisoning +/// +/// If the initialization closure passed to [`LazyCell::new`] panics, the cell will be poisoned. +/// Once the cell is poisoned, any threads that attempt to access this cell (via a dereference +/// or via an explicit call to [`force()`]) will panic. +/// +/// This concept is similar to that of poisoning in the [`std::sync::poison`] module. A key +/// difference, however, is that poisoning in `LazyCell` is _unrecoverable_. All future accesses of +/// the cell from other threads will panic, whereas a type in [`std::sync::poison`] like +/// [`std::sync::poison::Mutex`] allows recovery via [`PoisonError::into_inner()`]. +/// +/// [`force()`]: LazyCell::force +/// [`std::sync::poison`]: ../../std/sync/poison/index.html +/// [`std::sync::poison::Mutex`]: ../../std/sync/poison/struct.Mutex.html +/// [`PoisonError::into_inner()`]: ../../std/sync/poison/struct.PoisonError.html#method.into_inner +/// /// # Examples /// /// ``` @@ -64,6 +80,10 @@ impl T> LazyCell { /// /// Returns `Ok(value)` if `Lazy` is initialized and `Err(f)` otherwise. /// + /// # Panics + /// + /// Panics if the cell is poisoned. + /// /// # Examples /// /// ``` @@ -93,6 +113,15 @@ impl T> LazyCell { /// /// This is equivalent to the `Deref` impl, but is explicit. /// + /// # Panics + /// + /// If the initialization closure panics (the one that is passed to the [`new()`] method), the + /// panic is propagated to the caller, and the cell becomes poisoned. This will cause all future + /// accesses of the cell (via [`force()`] or a dereference) to panic. + /// + /// [`new()`]: LazyCell::new + /// [`force()`]: LazyCell::force + /// /// # Examples /// /// ``` @@ -123,6 +152,15 @@ impl T> LazyCell { /// Forces the evaluation of this lazy value and returns a mutable reference to /// the result. /// + /// # Panics + /// + /// If the initialization closure panics (the one that is passed to the [`new()`] method), the + /// panic is propagated to the caller, and the cell becomes poisoned. This will cause all future + /// accesses of the cell (via [`force()`] or a dereference) to panic. + /// + /// [`new()`]: LazyCell::new + /// [`force()`]: LazyCell::force + /// /// # Examples /// /// ``` @@ -219,7 +257,8 @@ impl T> LazyCell { } impl LazyCell { - /// Returns a mutable reference to the value if initialized, or `None` if not. + /// Returns a mutable reference to the value if initialized. Otherwise (if uninitialized or + /// poisoned), returns `None`. /// /// # Examples /// @@ -245,7 +284,8 @@ impl LazyCell { } } - /// Returns a reference to the value if initialized, or `None` if not. + /// Returns a reference to the value if initialized. Otherwise (if uninitialized or poisoned), + /// returns `None`. /// /// # Examples /// @@ -278,6 +318,15 @@ impl LazyCell { #[stable(feature = "lazy_cell", since = "1.80.0")] impl T> Deref for LazyCell { type Target = T; + + /// # Panics + /// + /// If the initialization closure panics (the one that is passed to the [`new()`] method), the + /// panic is propagated to the caller, and the cell becomes poisoned. This will cause all future + /// accesses of the cell (via [`force()`] or a dereference) to panic. + /// + /// [`new()`]: LazyCell::new + /// [`force()`]: LazyCell::force #[inline] fn deref(&self) -> &T { LazyCell::force(self) @@ -286,6 +335,14 @@ impl T> Deref for LazyCell { #[stable(feature = "lazy_deref_mut", since = "1.89.0")] impl T> DerefMut for LazyCell { + /// # Panics + /// + /// If the initialization closure panics (the one that is passed to the [`new()`] method), the + /// panic is propagated to the caller, and the cell becomes poisoned. This will cause all future + /// accesses of the cell (via [`force()`] or a dereference) to panic. + /// + /// [`new()`]: LazyCell::new + /// [`force()`]: LazyCell::force #[inline] fn deref_mut(&mut self) -> &mut T { LazyCell::force_mut(self) diff --git a/library/core/src/hint.rs b/library/core/src/hint.rs index 696d323c66d2e..c72eeb9a9c976 100644 --- a/library/core/src/hint.rs +++ b/library/core/src/hint.rs @@ -649,8 +649,6 @@ pub const fn must_use(value: T) -> T { /// } /// } /// ``` -/// -/// #[unstable(feature = "likely_unlikely", issue = "136873")] #[inline(always)] pub const fn likely(b: bool) -> bool { diff --git a/library/core/src/iter/mod.rs b/library/core/src/iter/mod.rs index 7ca41d224a0a6..56ca1305b601b 100644 --- a/library/core/src/iter/mod.rs +++ b/library/core/src/iter/mod.rs @@ -233,10 +233,12 @@ //! //! ``` //! let mut values = vec![41]; -//! for x in &mut values { // same as `values.iter_mut()` +//! for x in &mut values { +//! // ^ same as `values.iter_mut()` //! *x += 1; //! } -//! for x in &values { // same as `values.iter()` +//! for x in &values { +//! // ^ same as `values.iter()` //! assert_eq!(*x, 42); //! } //! assert_eq!(values.len(), 1); diff --git a/library/core/src/iter/traits/iterator.rs b/library/core/src/iter/traits/iterator.rs index b86926e0daf14..c38579b1512fc 100644 --- a/library/core/src/iter/traits/iterator.rs +++ b/library/core/src/iter/traits/iterator.rs @@ -811,7 +811,7 @@ pub trait Iterator { /// might be preferable to keep a functional style with longer iterators: /// /// ``` - /// (0..5).flat_map(|x| x * 100 .. x * 110) + /// (0..5).flat_map(|x| (x * 100)..(x * 110)) /// .enumerate() /// .filter(|&(i, x)| (i + x) % 3 == 0) /// .for_each(|(i, x)| println!("{i}:{x}")); diff --git a/library/core/src/macros/mod.rs b/library/core/src/macros/mod.rs index 3d57da63683b2..c59290a757b67 100644 --- a/library/core/src/macros/mod.rs +++ b/library/core/src/macros/mod.rs @@ -271,7 +271,10 @@ pub macro cfg_select($($tt:tt)*) { /// // expression given. /// debug_assert!(true); /// -/// fn some_expensive_computation() -> bool { true } // a very simple function +/// fn some_expensive_computation() -> bool { +/// // Some expensive computation here +/// true +/// } /// debug_assert!(some_expensive_computation()); /// /// // assert with a custom message @@ -1547,7 +1550,10 @@ pub(crate) mod builtin { /// // expression given. /// assert!(true); /// - /// fn some_computation() -> bool { true } // a very simple function + /// fn some_computation() -> bool { + /// // Some expensive computation here + /// true + /// } /// /// assert!(some_computation()); /// diff --git a/library/core/src/marker.rs b/library/core/src/marker.rs index 45277a1c82d3a..ba00ee17b6528 100644 --- a/library/core/src/marker.rs +++ b/library/core/src/marker.rs @@ -138,8 +138,7 @@ unsafe impl Send for &T {} /// impl Bar for Impl { } /// /// let x: &dyn Foo = &Impl; // OK -/// // let y: &dyn Bar = &Impl; // error: the trait `Bar` cannot -/// // be made into an object +/// // let y: &dyn Bar = &Impl; // error: the trait `Bar` cannot be made into an object /// ``` /// /// [trait object]: ../../book/ch17-02-trait-objects.html diff --git a/library/core/src/mem/maybe_uninit.rs b/library/core/src/mem/maybe_uninit.rs index 34d8370da7ecb..c160360cfacf9 100644 --- a/library/core/src/mem/maybe_uninit.rs +++ b/library/core/src/mem/maybe_uninit.rs @@ -773,8 +773,7 @@ impl MaybeUninit { /// // Initialize the `MaybeUninit` using `Cell::set`: /// unsafe { /// b.assume_init_ref().set(true); - /// // ^^^^^^^^^^^^^^^ - /// // Reference to an uninitialized `Cell`: UB! + /// //^^^^^^^^^^^^^^^ Reference to an uninitialized `Cell`: UB! /// } /// ``` #[stable(feature = "maybe_uninit_ref", since = "1.55.0")] @@ -864,9 +863,9 @@ impl MaybeUninit { /// { /// let mut buffer = MaybeUninit::<[u8; 64]>::uninit(); /// reader.read_exact(unsafe { buffer.assume_init_mut() })?; - /// // ^^^^^^^^^^^^^^^^^^^^^^^^ - /// // (mutable) reference to uninitialized memory! - /// // This is undefined behavior. + /// // ^^^^^^^^^^^^^^^^^^^^^^^^ + /// // (mutable) reference to uninitialized memory! + /// // This is undefined behavior. /// Ok(unsafe { buffer.assume_init() }) /// } /// ``` @@ -884,13 +883,13 @@ impl MaybeUninit { /// let foo: Foo = unsafe { /// let mut foo = MaybeUninit::::uninit(); /// ptr::write(&mut foo.assume_init_mut().a as *mut u32, 1337); - /// // ^^^^^^^^^^^^^^^^^^^^^ - /// // (mutable) reference to uninitialized memory! - /// // This is undefined behavior. + /// // ^^^^^^^^^^^^^^^^^^^^^ + /// // (mutable) reference to uninitialized memory! + /// // This is undefined behavior. /// ptr::write(&mut foo.assume_init_mut().b as *mut u8, 42); - /// // ^^^^^^^^^^^^^^^^^^^^^ - /// // (mutable) reference to uninitialized memory! - /// // This is undefined behavior. + /// // ^^^^^^^^^^^^^^^^^^^^^ + /// // (mutable) reference to uninitialized memory! + /// // This is undefined behavior. /// foo.assume_init() /// }; /// ``` diff --git a/library/core/src/num/niche_types.rs b/library/core/src/num/niche_types.rs index b92561c9e356d..d57b1d433e51c 100644 --- a/library/core/src/num/niche_types.rs +++ b/library/core/src/num/niche_types.rs @@ -46,11 +46,11 @@ macro_rules! define_valid_range_type { /// primitive without checking whether its zero. /// /// # Safety - /// Immediate language UB if `val == 0`, as it violates the validity - /// invariant of this type. + /// Immediate language UB if `val` is not within the valid range for this + /// type, as it violates the validity invariant. #[inline] pub const unsafe fn new_unchecked(val: $int) -> Self { - // SAFETY: Caller promised that `val` is non-zero. + // SAFETY: Caller promised that `val` is within the valid range. unsafe { $name(val) } } diff --git a/library/core/src/primitive_docs.rs b/library/core/src/primitive_docs.rs index 9a1ba7d1728cb..1c824e336bed7 100644 --- a/library/core/src/primitive_docs.rs +++ b/library/core/src/primitive_docs.rs @@ -271,6 +271,7 @@ mod prim_bool {} /// When the compiler sees a value of type `!` in a [coercion site], it implicitly inserts a /// coercion to allow the type checker to infer any type: /// +// FIXME: use `core::convert::absurd` here instead, once it's merged /// ```rust,ignore (illustrative-and-has-placeholders) /// // this /// let x: u8 = panic!(); @@ -281,7 +282,6 @@ mod prim_bool {} /// // where absurd is a function with the following signature /// // (it's sound, because `!` always marks unreachable code): /// fn absurd(_: !) -> T { ... } -// FIXME: use `core::convert::absurd` here instead, once it's merged /// ``` /// /// This can lead to compilation errors if the type cannot be inferred: diff --git a/library/core/src/ptr/alignment.rs b/library/core/src/ptr/alignment.rs index 99bf323ec572c..bbbcf1eb89183 100644 --- a/library/core/src/ptr/alignment.rs +++ b/library/core/src/ptr/alignment.rs @@ -1,3 +1,5 @@ +#![allow(clippy::enum_clike_unportable_variant)] + use safety::{ensures, invariant, requires}; #[cfg(kani)] @@ -266,7 +268,7 @@ impl const Default for Alignment { #[cfg(target_pointer_width = "16")] #[derive(Copy, Clone, PartialEq, Eq)] -#[repr(u16)] +#[repr(usize)] #[cfg_attr(kani, derive(kani::Arbitrary))] enum AlignmentEnum { _Align1Shl0 = 1 << 0, @@ -289,7 +291,7 @@ enum AlignmentEnum { #[cfg(target_pointer_width = "32")] #[derive(Copy, Clone, PartialEq, Eq)] -#[repr(u32)] +#[repr(usize)] #[cfg_attr(kani, derive(kani::Arbitrary))] enum AlignmentEnum { _Align1Shl0 = 1 << 0, @@ -328,7 +330,7 @@ enum AlignmentEnum { #[cfg(target_pointer_width = "64")] #[derive(Copy, Clone, PartialEq, Eq)] -#[repr(u64)] +#[repr(usize)] #[cfg_attr(kani, derive(kani::Arbitrary))] enum AlignmentEnum { _Align1Shl0 = 1 << 0, diff --git a/library/core/src/result.rs b/library/core/src/result.rs index f65257ff59b92..474f86395ae0e 100644 --- a/library/core/src/result.rs +++ b/library/core/src/result.rs @@ -534,6 +534,7 @@ #![stable(feature = "rust1", since = "1.0.0")] use crate::iter::{self, FusedIterator, TrustedLen}; +use crate::marker::Destruct; use crate::ops::{self, ControlFlow, Deref, DerefMut}; use crate::{convert, fmt, hint}; @@ -606,7 +607,13 @@ impl Result { #[must_use] #[inline] #[stable(feature = "is_some_and", since = "1.70.0")] - pub fn is_ok_and(self, f: impl FnOnce(T) -> bool) -> bool { + #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")] + pub const fn is_ok_and(self, f: F) -> bool + where + F: ~const FnOnce(T) -> bool + ~const Destruct, + T: ~const Destruct, + E: ~const Destruct, + { match self { Err(_) => false, Ok(x) => f(x), @@ -655,7 +662,13 @@ impl Result { #[must_use] #[inline] #[stable(feature = "is_some_and", since = "1.70.0")] - pub fn is_err_and(self, f: impl FnOnce(E) -> bool) -> bool { + #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")] + pub const fn is_err_and(self, f: F) -> bool + where + F: ~const FnOnce(E) -> bool + ~const Destruct, + E: ~const Destruct, + T: ~const Destruct, + { match self { Ok(_) => false, Err(e) => f(e), @@ -682,8 +695,13 @@ impl Result { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")] #[rustc_diagnostic_item = "result_ok_method"] - pub fn ok(self) -> Option { + pub const fn ok(self) -> Option + where + T: ~const Destruct, + E: ~const Destruct, + { match self { Ok(x) => Some(x), Err(_) => None, @@ -706,7 +724,12 @@ impl Result { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn err(self) -> Option { + #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")] + pub const fn err(self) -> Option + where + T: ~const Destruct, + E: ~const Destruct, + { match self { Ok(_) => None, Err(x) => Some(x), @@ -796,7 +819,11 @@ impl Result { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn map U>(self, op: F) -> Result { + #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")] + pub const fn map(self, op: F) -> Result + where + F: ~const FnOnce(T) -> U + ~const Destruct, + { match self { Ok(t) => Ok(op(t)), Err(e) => Err(e), @@ -823,8 +850,15 @@ impl Result { /// ``` #[inline] #[stable(feature = "result_map_or", since = "1.41.0")] + #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")] #[must_use = "if you don't need the returned value, use `if let` instead"] - pub fn map_or U>(self, default: U, f: F) -> U { + pub const fn map_or(self, default: U, f: F) -> U + where + F: ~const FnOnce(T) -> U + ~const Destruct, + T: ~const Destruct, + E: ~const Destruct, + U: ~const Destruct, + { match self { Ok(t) => f(t), Err(_) => default, @@ -851,7 +885,12 @@ impl Result { /// ``` #[inline] #[stable(feature = "result_map_or_else", since = "1.41.0")] - pub fn map_or_else U, F: FnOnce(T) -> U>(self, default: D, f: F) -> U { + #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")] + pub const fn map_or_else(self, default: D, f: F) -> U + where + D: ~const FnOnce(E) -> U + ~const Destruct, + F: ~const FnOnce(T) -> U + ~const Destruct, + { match self { Ok(t) => f(t), Err(e) => default(e), @@ -877,10 +916,13 @@ impl Result { /// [default value]: Default::default #[inline] #[unstable(feature = "result_option_map_or_default", issue = "138099")] - pub fn map_or_default(self, f: F) -> U + #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")] + pub const fn map_or_default(self, f: F) -> U where - U: Default, - F: FnOnce(T) -> U, + F: ~const FnOnce(T) -> U + ~const Destruct, + U: ~const Default, + T: ~const Destruct, + E: ~const Destruct, { match self { Ok(t) => f(t), @@ -908,7 +950,11 @@ impl Result { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn map_err F>(self, op: O) -> Result { + #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")] + pub const fn map_err(self, op: O) -> Result + where + O: ~const FnOnce(E) -> F + ~const Destruct, + { match self { Ok(t) => Ok(t), Err(e) => Err(op(e)), @@ -930,7 +976,11 @@ impl Result { /// ``` #[inline] #[stable(feature = "result_option_inspect", since = "1.76.0")] - pub fn inspect(self, f: F) -> Self { + #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")] + pub const fn inspect(self, f: F) -> Self + where + F: ~const FnOnce(&T) + ~const Destruct, + { if let Ok(ref t) = self { f(t); } @@ -954,7 +1004,11 @@ impl Result { /// ``` #[inline] #[stable(feature = "result_option_inspect", since = "1.76.0")] - pub fn inspect_err(self, f: F) -> Self { + #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")] + pub const fn inspect_err(self, f: F) -> Self + where + F: ~const FnOnce(&E) + ~const Destruct, + { if let Err(ref e) = self { f(e); } @@ -1033,7 +1087,8 @@ impl Result { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn iter(&self) -> Iter<'_, T> { + #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")] + pub const fn iter(&self) -> Iter<'_, T> { Iter { inner: self.as_ref().ok() } } @@ -1056,7 +1111,8 @@ impl Result { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn iter_mut(&mut self) -> IterMut<'_, T> { + #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")] + pub const fn iter_mut(&mut self) -> IterMut<'_, T> { IterMut { inner: self.as_mut().ok() } } @@ -1195,9 +1251,11 @@ impl Result { /// [`FromStr`]: crate::str::FromStr #[inline] #[stable(feature = "result_unwrap_or_default", since = "1.16.0")] - pub fn unwrap_or_default(self) -> T + #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")] + pub const fn unwrap_or_default(self) -> T where - T: Default, + T: ~const Default + ~const Destruct, + E: ~const Destruct, { match self { Ok(x) => x, @@ -1370,7 +1428,13 @@ impl Result { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn and(self, res: Result) -> Result { + #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")] + pub const fn and(self, res: Result) -> Result + where + T: ~const Destruct, + E: ~const Destruct, + U: ~const Destruct, + { match self { Ok(_) => res, Err(e) => Err(e), @@ -1409,8 +1473,12 @@ impl Result { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")] #[rustc_confusables("flat_map", "flatmap")] - pub fn and_then Result>(self, op: F) -> Result { + pub const fn and_then(self, op: F) -> Result + where + F: ~const FnOnce(T) -> Result + ~const Destruct, + { match self { Ok(t) => op(t), Err(e) => Err(e), @@ -1446,7 +1514,13 @@ impl Result { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn or(self, res: Result) -> Result { + #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")] + pub const fn or(self, res: Result) -> Result + where + T: ~const Destruct, + E: ~const Destruct, + F: ~const Destruct, + { match self { Ok(v) => Ok(v), Err(_) => res, @@ -1471,7 +1545,11 @@ impl Result { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn or_else Result>(self, op: O) -> Result { + #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")] + pub const fn or_else(self, op: O) -> Result + where + O: ~const FnOnce(E) -> Result + ~const Destruct, + { match self { Ok(t) => Ok(t), Err(e) => op(e), @@ -1498,7 +1576,12 @@ impl Result { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn unwrap_or(self, default: T) -> T { + #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")] + pub const fn unwrap_or(self, default: T) -> T + where + T: ~const Destruct, + E: ~const Destruct, + { match self { Ok(t) => t, Err(_) => default, @@ -1519,7 +1602,11 @@ impl Result { #[inline] #[track_caller] #[stable(feature = "rust1", since = "1.0.0")] - pub fn unwrap_or_else T>(self, op: F) -> T { + #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")] + pub const fn unwrap_or_else(self, op: F) -> T + where + F: ~const FnOnce(E) -> T + ~const Destruct, + { match self { Ok(t) => t, Err(e) => op(e), @@ -1544,7 +1631,7 @@ impl Result { /// /// ```no_run /// let x: Result = Err("emergency failure"); - /// unsafe { x.unwrap_unchecked(); } // Undefined behavior! + /// unsafe { x.unwrap_unchecked() }; // Undefined behavior! /// ``` #[inline] #[track_caller] @@ -1762,7 +1849,7 @@ impl Result, E> { #[cold] #[track_caller] fn unwrap_failed(msg: &str, error: &dyn fmt::Debug) -> ! { - panic!("{msg}: {error:?}") + panic!("{msg}: {error:?}"); } // This is a separate function to avoid constructing a `dyn Debug` @@ -1773,7 +1860,7 @@ fn unwrap_failed(msg: &str, error: &dyn fmt::Debug) -> ! { #[inline] #[cold] #[track_caller] -fn unwrap_failed(_msg: &str, _error: &T) -> ! { +const fn unwrap_failed(_msg: &str, _error: &T) -> ! { panic!() } diff --git a/library/core/src/slice/mod.rs b/library/core/src/slice/mod.rs index d3e78f011daa7..f17a63db1e44a 100644 --- a/library/core/src/slice/mod.rs +++ b/library/core/src/slice/mod.rs @@ -3978,8 +3978,9 @@ impl [T] { /// /// [`split_at_mut`]: slice::split_at_mut #[stable(feature = "swap_with_slice", since = "1.27.0")] + #[rustc_const_unstable(feature = "const_swap_with_slice", issue = "142204")] #[track_caller] - pub fn swap_with_slice(&mut self, other: &mut [T]) { + pub const fn swap_with_slice(&mut self, other: &mut [T]) { assert!(self.len() == other.len(), "destination and source slices have different lengths"); // SAFETY: `self` is valid for `self.len()` elements by definition, and `src` was // checked to have the same length. The slices cannot overlap because diff --git a/library/coretests/tests/lib.rs b/library/coretests/tests/lib.rs index 029a7b00ad36e..0a9c0c61c9584 100644 --- a/library/coretests/tests/lib.rs +++ b/library/coretests/tests/lib.rs @@ -20,6 +20,7 @@ #![feature(const_eval_select)] #![feature(const_ops)] #![feature(const_ref_cell)] +#![feature(const_result_trait_fn)] #![feature(const_trait_impl)] #![feature(core_float_math)] #![feature(core_intrinsics)] @@ -82,6 +83,7 @@ #![feature(pointer_is_aligned_to)] #![feature(portable_simd)] #![feature(ptr_metadata)] +#![feature(result_option_map_or_default)] #![feature(slice_from_ptr_range)] #![feature(slice_internals)] #![feature(slice_partition_dedup)] diff --git a/library/coretests/tests/result.rs b/library/coretests/tests/result.rs index 90ec844bc5771..39898d5dbb76e 100644 --- a/library/coretests/tests/result.rs +++ b/library/coretests/tests/result.rs @@ -421,3 +421,86 @@ fn result_try_trait_v2_branch() { assert_eq!(Ok::, ()>(one).branch(), Continue(one)); assert_eq!(Err::, ()>(()).branch(), Break(Err(()))); } + +// helper functions for const contexts +const fn eq10(x: u8) -> bool { + x == 10 +} +const fn eq20(e: u8) -> bool { + e == 20 +} +const fn double_u16(x: u8) -> u16 { + x as u16 * 2 +} +const fn to_u16(x: u8) -> u16 { + x as u16 +} +const fn err_to_u16_plus1(e: u8) -> u16 { + e as u16 + 1 +} +const fn inc_u8(x: u8) -> u8 { + x + 1 +} +const fn noop_u8_ref(_x: &u8) {} +const fn add1_result(x: u8) -> Result { + Ok(x + 1) +} +const fn add5_result(e: u8) -> Result { + Ok(e + 5) +} +const fn plus7_u8(e: u8) -> u8 { + e + 7 +} + +#[test] +fn test_const_result() { + const { + let r_ok: Result = Ok(10); + let r_err: Result = Err(20); + assert!(r_ok.is_ok()); + assert!(r_err.is_err()); + + let ok_and = r_ok.is_ok_and(eq10); + let err_and = r_err.is_err_and(eq20); + assert!(ok_and); + assert!(err_and); + + let opt_ok: Option = r_ok.ok(); + let opt_err: Option = r_err.err(); + assert!(opt_ok.unwrap() == 10); + assert!(opt_err.unwrap() == 20); + + let mapped: Result = r_ok.map(double_u16); + let map_or: u16 = r_ok.map_or(0, to_u16); + let map_or_else: u16 = r_err.map_or_else(err_to_u16_plus1, to_u16); + let map_or_default: u8 = r_err.map_or_default(inc_u8); + assert!(mapped.unwrap_or_default() == 20); + assert!(map_or == 10); + assert!(map_or_else == 21); + assert!(map_or_default == 0); + + let _map_err: Result = r_err.map_err(to_u16); + //FIXME: currently can't unwrap const error + // assert!(map_err.unwrap_err() == 20); + + let inspected_ok: Result = r_ok.inspect(noop_u8_ref); + let inspected_err: Result = r_err.inspect_err(noop_u8_ref); + assert!(inspected_ok.is_ok()); + assert!(inspected_err.is_err()); + + let unwrapped_default: u8 = r_err.unwrap_or_default(); + assert!(unwrapped_default == 0); + + let and_then: Result = r_ok.and_then(add1_result); + let or: Result = r_err.or(Ok(5)); + let or_else: Result = r_err.or_else(add5_result); + assert!(and_then.unwrap_or_default() == 11); + assert!(or.unwrap_or_default() == 5); + assert!(or_else.unwrap_or_default() == 25); + + let u_or: u8 = r_err.unwrap_or(7); + let u_or_else: u8 = r_err.unwrap_or_else(plus7_u8); + assert!(u_or == 7); + assert!(u_or_else == 27); + }; +} diff --git a/library/panic_abort/Cargo.toml b/library/panic_abort/Cargo.toml index e8c66f1a4dd34..ecf043ac7071c 100644 --- a/library/panic_abort/Cargo.toml +++ b/library/panic_abort/Cargo.toml @@ -12,8 +12,7 @@ bench = false doc = false [dependencies] -core = { path = "../core" } -compiler_builtins = { path = "../compiler-builtins/compiler-builtins" } +core = { path = "../rustc-std-workspace-core", package = "rustc-std-workspace-core" } [target.'cfg(target_os = "android")'.dependencies] libc = { version = "0.2", default-features = false } diff --git a/library/panic_unwind/Cargo.toml b/library/panic_unwind/Cargo.toml index 47914b9cd3c6e..13d1a7160da8d 100644 --- a/library/panic_unwind/Cargo.toml +++ b/library/panic_unwind/Cargo.toml @@ -13,10 +13,9 @@ doc = false [dependencies] alloc = { path = "../alloc" } -core = { path = "../core" } -unwind = { path = "../unwind" } -compiler_builtins = { path = "../compiler-builtins/compiler-builtins" } cfg-if = { version = "1.0", features = ['rustc-dep-of-std'] } +core = { path = "../rustc-std-workspace-core", package = "rustc-std-workspace-core" } +unwind = { path = "../unwind" } [target.'cfg(not(all(windows, target_env = "msvc")))'.dependencies] libc = { version = "0.2", default-features = false } diff --git a/library/rustc-std-workspace-core/README.md b/library/rustc-std-workspace-core/README.md index 55a36e741065f..15bc93f7948a3 100644 --- a/library/rustc-std-workspace-core/README.md +++ b/library/rustc-std-workspace-core/README.md @@ -11,6 +11,12 @@ on crates.io will draw a dependency edge to `libcore`, the version defined in this repository. That should draw all the dependency edges to ensure Cargo builds crates successfully! +`rustc-std-workspace-core` also ensures `compiler-builtins` is in the crate +graph. This crate is used by other crates in `library/`, other than `std` and +`alloc`, so the `compiler-builtins` setup only needs to be configured in a +single place. (Otherwise these crates would just need to depend on `core` and +`compiler-builtins` separately.) + Note that crates on crates.io need to depend on this crate with the name `core` for everything to work correctly. To do that they can use: diff --git a/library/std/Cargo.toml b/library/std/Cargo.toml index 6b539279ea2d7..6fdeb97f5874e 100644 --- a/library/std/Cargo.toml +++ b/library/std/Cargo.toml @@ -18,7 +18,6 @@ cfg-if = { version = "1.0", features = ['rustc-dep-of-std'] } panic_unwind = { path = "../panic_unwind", optional = true } panic_abort = { path = "../panic_abort" } core = { path = "../core", public = true } -compiler_builtins = { path = "../compiler-builtins/compiler-builtins" } unwind = { path = "../unwind" } hashbrown = { version = "0.15", default-features = false, features = [ 'rustc-dep-of-std', diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs index 38eec467e3ea1..9896d07fe7b82 100644 --- a/library/std/src/lib.rs +++ b/library/std/src/lib.rs @@ -295,6 +295,8 @@ #![feature(f128)] #![feature(ffi_const)] #![feature(formatting_options)] +#![feature(hash_map_internals)] +#![feature(hash_map_macro)] #![feature(if_let_guard)] #![feature(intra_doc_pointers)] #![feature(iter_advance_by)] diff --git a/library/std/src/macros.rs b/library/std/src/macros.rs index 25e2b7ea13703..254570ae9c836 100644 --- a/library/std/src/macros.rs +++ b/library/std/src/macros.rs @@ -379,3 +379,77 @@ macro_rules! dbg { ($($crate::dbg!($val)),+,) }; } + +#[doc(hidden)] +#[macro_export] +#[allow_internal_unstable(hash_map_internals)] +#[unstable(feature = "hash_map_internals", issue = "none")] +macro_rules! repetition_utils { + (@count $($tokens:tt),*) => {{ + [$($crate::repetition_utils!(@replace $tokens => ())),*].len() + }}; + + (@replace $x:tt => $y:tt) => { $y } +} + +/// Creates a [`HashMap`] containing the arguments. +/// +/// `hash_map!` allows specifying the entries that make +/// up the [`HashMap`] where the key and value are separated by a `=>`. +/// +/// The entries are separated by commas with a trailing comma being allowed. +/// +/// It is semantically equivalent to using repeated [`HashMap::insert`] +/// on a newly created hashmap. +/// +/// `hash_map!` will attempt to avoid repeated reallocations by +/// using [`HashMap::with_capacity`]. +/// +/// # Examples +/// +/// ```rust +/// #![feature(hash_map_macro)] +/// +/// let map = hash_map! { +/// "key" => "value", +/// "key1" => "value1" +/// }; +/// +/// assert_eq!(map.get("key"), Some(&"value")); +/// assert_eq!(map.get("key1"), Some(&"value1")); +/// assert!(map.get("brrrrrrooooommm").is_none()); +/// ``` +/// +/// And with a trailing comma +/// +///```rust +/// #![feature(hash_map_macro)] +/// +/// let map = hash_map! { +/// "key" => "value", // notice the , +/// }; +/// +/// assert_eq!(map.get("key"), Some(&"value")); +/// ``` +/// +/// The key and value are moved into the HashMap. +/// +/// [`HashMap`]: crate::collections::HashMap +/// [`HashMap::insert`]: crate::collections::HashMap::insert +/// [`HashMap::with_capacity`]: crate::collections::HashMap::with_capacity +#[macro_export] +#[allow_internal_unstable(hash_map_internals)] +#[unstable(feature = "hash_map_macro", issue = "144032")] +macro_rules! hash_map { + () => {{ + $crate::collections::HashMap::new() + }}; + + ( $( $key:expr => $value:expr ),* $(,)? ) => {{ + let mut map = $crate::collections::HashMap::with_capacity( + const { $crate::repetition_utils!(@count $($key),*) } + ); + $( map.insert($key, $value); )* + map + }} +} diff --git a/library/std/src/panic.rs b/library/std/src/panic.rs index 234fb284a5904..913ef72f67464 100644 --- a/library/std/src/panic.rs +++ b/library/std/src/panic.rs @@ -388,7 +388,7 @@ pub fn catch_unwind R + UnwindSafe, R>(f: F) -> Result { /// ``` #[stable(feature = "resume_unwind", since = "1.9.0")] pub fn resume_unwind(payload: Box) -> ! { - panicking::rust_panic_without_hook(payload) + panicking::resume_unwind(payload) } /// Makes all future panics abort directly without running the panic hook or unwinding. diff --git a/library/std/src/panicking.rs b/library/std/src/panicking.rs index 7873049d20bfd..224cd39855a45 100644 --- a/library/std/src/panicking.rs +++ b/library/std/src/panicking.rs @@ -696,14 +696,14 @@ pub fn begin_panic_handler(info: &core::panic::PanicInfo<'_>) -> ! { let msg = info.message(); crate::sys::backtrace::__rust_end_short_backtrace(move || { if let Some(s) = msg.as_str() { - rust_panic_with_hook( + panic_with_hook( &mut StaticStrPayload(s), loc, info.can_unwind(), info.force_no_backtrace(), ); } else { - rust_panic_with_hook( + panic_with_hook( &mut FormatStringPayload { inner: &msg, string: None }, loc, info.can_unwind(), @@ -767,7 +767,7 @@ pub const fn begin_panic(msg: M) -> ! { let loc = Location::caller(); crate::sys::backtrace::__rust_end_short_backtrace(move || { - rust_panic_with_hook( + panic_with_hook( &mut Payload { inner: Some(msg) }, loc, /* can_unwind */ true, @@ -792,7 +792,7 @@ fn payload_as_str(payload: &dyn Any) -> &str { /// panics, panic hooks, and finally dispatching to the panic runtime to either /// abort or unwind. #[optimize(size)] -fn rust_panic_with_hook( +fn panic_with_hook( payload: &mut dyn PanicPayload, location: &Location<'_>, can_unwind: bool, @@ -861,7 +861,7 @@ fn rust_panic_with_hook( /// This is the entry point for `resume_unwind`. /// It just forwards the payload to the panic runtime. #[cfg_attr(feature = "panic_immediate_abort", inline)] -pub fn rust_panic_without_hook(payload: Box) -> ! { +pub fn resume_unwind(payload: Box) -> ! { panic_count::increase(false); struct RewrapBox(Box); @@ -885,8 +885,8 @@ pub fn rust_panic_without_hook(payload: Box) -> ! { rust_panic(&mut RewrapBox(payload)) } -/// An unmangled function (through `rustc_std_internal_symbol`) on which to slap -/// yer breakpoints. +/// A function with a fixed suffix (through `rustc_std_internal_symbol`) +/// on which to slap yer breakpoints. #[inline(never)] #[cfg_attr(not(test), rustc_std_internal_symbol)] #[cfg(not(feature = "panic_immediate_abort"))] diff --git a/library/std/src/path.rs b/library/std/src/path.rs index d9c34d4fa0451..055e7f814800d 100644 --- a/library/std/src/path.rs +++ b/library/std/src/path.rs @@ -3259,8 +3259,8 @@ impl Path { /// /// # Examples /// - #[cfg_attr(unix, doc = "```no_run")] - #[cfg_attr(not(unix), doc = "```ignore")] + /// ```rust,no_run + /// # #[cfg(unix)] { /// use std::path::Path; /// use std::os::unix::fs::symlink; /// @@ -3268,6 +3268,7 @@ impl Path { /// symlink("/origin_does_not_exist/", link_path).unwrap(); /// assert_eq!(link_path.is_symlink(), true); /// assert_eq!(link_path.exists(), false); + /// # } /// ``` /// /// # See Also diff --git a/library/std/src/sync/lazy_lock.rs b/library/std/src/sync/lazy_lock.rs index eba849d16dacd..a40e29a772a9c 100644 --- a/library/std/src/sync/lazy_lock.rs +++ b/library/std/src/sync/lazy_lock.rs @@ -25,6 +25,22 @@ union Data { /// /// [`LazyCell`]: crate::cell::LazyCell /// +/// # Poisoning +/// +/// If the initialization closure passed to [`LazyLock::new`] panics, the lock will be poisoned. +/// Once the lock is poisoned, any threads that attempt to access this lock (via a dereference +/// or via an explicit call to [`force()`]) will panic. +/// +/// This concept is similar to that of poisoning in the [`std::sync::poison`] module. A key +/// difference, however, is that poisoning in `LazyLock` is _unrecoverable_. All future accesses of +/// the lock from other threads will panic, whereas a type in [`std::sync::poison`] like +/// [`std::sync::poison::Mutex`] allows recovery via [`PoisonError::into_inner()`]. +/// +/// [`force()`]: LazyLock::force +/// [`std::sync::poison`]: crate::sync::poison +/// [`std::sync::poison::Mutex`]: crate::sync::poison::Mutex +/// [`PoisonError::into_inner()`]: crate::sync::poison::PoisonError::into_inner +/// /// # Examples /// /// Initialize static variables with `LazyLock`. @@ -102,6 +118,10 @@ impl T> LazyLock { /// /// Returns `Ok(value)` if `Lazy` is initialized and `Err(f)` otherwise. /// + /// # Panics + /// + /// Panics if the lock is poisoned. + /// /// # Examples /// /// ``` @@ -136,6 +156,15 @@ impl T> LazyLock { /// Forces the evaluation of this lazy value and returns a mutable reference to /// the result. /// + /// # Panics + /// + /// If the initialization closure panics (the one that is passed to the [`new()`] method), the + /// panic is propagated to the caller, and the lock becomes poisoned. This will cause all future + /// accesses of the lock (via [`force()`] or a dereference) to panic. + /// + /// [`new()`]: LazyLock::new + /// [`force()`]: LazyLock::force + /// /// # Examples /// /// ``` @@ -193,6 +222,15 @@ impl T> LazyLock { /// This method will block the calling thread if another initialization /// routine is currently running. /// + /// # Panics + /// + /// If the initialization closure panics (the one that is passed to the [`new()`] method), the + /// panic is propagated to the caller, and the lock becomes poisoned. This will cause all future + /// accesses of the lock (via [`force()`] or a dereference) to panic. + /// + /// [`new()`]: LazyLock::new + /// [`force()`]: LazyLock::force + /// /// # Examples /// /// ``` @@ -227,7 +265,8 @@ impl T> LazyLock { } impl LazyLock { - /// Returns a mutable reference to the value if initialized, or `None` if not. + /// Returns a mutable reference to the value if initialized. Otherwise (if uninitialized or + /// poisoned), returns `None`. /// /// # Examples /// @@ -256,7 +295,8 @@ impl LazyLock { } } - /// Returns a reference to the value if initialized, or `None` if not. + /// Returns a reference to the value if initialized. Otherwise (if uninitialized or poisoned), + /// returns `None`. /// /// # Examples /// @@ -307,6 +347,14 @@ impl T> Deref for LazyLock { /// This method will block the calling thread if another initialization /// routine is currently running. /// + /// # Panics + /// + /// If the initialization closure panics (the one that is passed to the [`new()`] method), the + /// panic is propagated to the caller, and the lock becomes poisoned. This will cause all future + /// accesses of the lock (via [`force()`] or a dereference) to panic. + /// + /// [`new()`]: LazyLock::new + /// [`force()`]: LazyLock::force #[inline] fn deref(&self) -> &T { LazyLock::force(self) @@ -315,6 +363,14 @@ impl T> Deref for LazyLock { #[stable(feature = "lazy_deref_mut", since = "1.89.0")] impl T> DerefMut for LazyLock { + /// # Panics + /// + /// If the initialization closure panics (the one that is passed to the [`new()`] method), the + /// panic is propagated to the caller, and the lock becomes poisoned. This will cause all future + /// accesses of the lock (via [`force()`] or a dereference) to panic. + /// + /// [`new()`]: LazyLock::new + /// [`force()`]: LazyLock::force #[inline] fn deref_mut(&mut self) -> &mut T { LazyLock::force_mut(self) diff --git a/library/std/src/sync/once_lock.rs b/library/std/src/sync/once_lock.rs index a5c3a6c46a433..b224044cbe09c 100644 --- a/library/std/src/sync/once_lock.rs +++ b/library/std/src/sync/once_lock.rs @@ -16,6 +16,8 @@ use crate::sync::Once; /// A `OnceLock` can be thought of as a safe abstraction over uninitialized data that becomes /// initialized once written. /// +/// Unlike [`Mutex`](crate::sync::Mutex), `OnceLock` is never poisoned on panic. +/// /// [`OnceCell`]: crate::cell::OnceCell /// [`LazyLock`]: crate::sync::LazyLock /// [`LazyLock::new(|| ...)`]: crate::sync::LazyLock::new diff --git a/library/std/src/sync/poison.rs b/library/std/src/sync/poison.rs index b901a5701a488..31889dcc10fad 100644 --- a/library/std/src/sync/poison.rs +++ b/library/std/src/sync/poison.rs @@ -2,15 +2,16 @@ //! //! # Poisoning //! -//! All synchronization objects in this module implement a strategy called "poisoning" -//! where if a thread panics while holding the exclusive access granted by the primitive, -//! the state of the primitive is set to "poisoned". -//! This information is then propagated to all other threads +//! All synchronization objects in this module implement a strategy called +//! "poisoning" where a primitive becomes poisoned if it recognizes that some +//! thread has panicked while holding the exclusive access granted by the +//! primitive. This information is then propagated to all other threads //! to signify that the data protected by this primitive is likely tainted //! (some invariant is not being upheld). //! -//! The specifics of how this "poisoned" state affects other threads -//! depend on the primitive. See [#Overview] below. +//! The specifics of how this "poisoned" state affects other threads and whether +//! the panics are recognized reliably or on a best-effort basis depend on the +//! primitive. See [Overview](#overview) below. //! //! For the alternative implementations that do not employ poisoning, //! see [`std::sync::nonpoison`]. @@ -36,14 +37,15 @@ //! - [`Mutex`]: Mutual Exclusion mechanism, which ensures that at //! most one thread at a time is able to access some data. //! -//! [`Mutex::lock()`] returns a [`LockResult`], -//! providing a way to deal with the poisoned state. -//! See [`Mutex`'s documentation](Mutex#poisoning) for more. +//! Panicking while holding the lock typically poisons the mutex, but it is +//! not guaranteed to detect this condition in all circumstances. +//! [`Mutex::lock()`] returns a [`LockResult`], providing a way to deal with +//! the poisoned state. See [`Mutex`'s documentation](Mutex#poisoning) for more. //! //! - [`Once`]: A thread-safe way to run a piece of code only once. //! Mostly useful for implementing one-time global initialization. //! -//! [`Once`] is poisoned if the piece of code passed to +//! [`Once`] is reliably poisoned if the piece of code passed to //! [`Once::call_once()`] or [`Once::call_once_force()`] panics. //! When in poisoned state, subsequent calls to [`Once::call_once()`] will panic too. //! [`Once::call_once_force()`] can be used to clear the poisoned state. @@ -53,7 +55,7 @@ //! writer at a time. In some cases, this can be more efficient than //! a mutex. //! -//! This implementation, like [`Mutex`], will become poisoned on a panic. +//! This implementation, like [`Mutex`], usually becomes poisoned on a panic. //! Note, however, that an `RwLock` may only be poisoned if a panic occurs //! while it is locked exclusively (write mode). If a panic occurs in any reader, //! then the lock will not be poisoned. diff --git a/library/std/src/sync/poison/mutex.rs b/library/std/src/sync/poison/mutex.rs index 64744f18c746e..6205c4fa4ca4b 100644 --- a/library/std/src/sync/poison/mutex.rs +++ b/library/std/src/sync/poison/mutex.rs @@ -18,20 +18,69 @@ use crate::sys::sync as sys; /// # Poisoning /// /// The mutexes in this module implement a strategy called "poisoning" where a -/// mutex is considered poisoned whenever a thread panics while holding the -/// mutex. Once a mutex is poisoned, all other threads are unable to access the -/// data by default as it is likely tainted (some invariant is not being -/// upheld). +/// mutex becomes poisoned if it recognizes that the thread holding it has +/// panicked. /// -/// For a mutex, this means that the [`lock`] and [`try_lock`] methods return a +/// Once a mutex is poisoned, all other threads are unable to access the data by +/// default as it is likely tainted (some invariant is not being upheld). For a +/// mutex, this means that the [`lock`] and [`try_lock`] methods return a /// [`Result`] which indicates whether a mutex has been poisoned or not. Most /// usage of a mutex will simply [`unwrap()`] these results, propagating panics /// among threads to ensure that a possibly invalid invariant is not witnessed. /// -/// A poisoned mutex, however, does not prevent all access to the underlying -/// data. The [`PoisonError`] type has an [`into_inner`] method which will return -/// the guard that would have otherwise been returned on a successful lock. This -/// allows access to the data, despite the lock being poisoned. +/// Poisoning is only advisory: the [`PoisonError`] type has an [`into_inner`] +/// method which will return the guard that would have otherwise been returned +/// on a successful lock. This allows access to the data, despite the lock being +/// poisoned. +/// +/// In addition, the panic detection is not ideal, so even unpoisoned mutexes +/// need to be handled with care, since certain panics may have been skipped. +/// Here is a non-exhaustive list of situations where this might occur: +/// +/// - If a mutex is locked while a panic is underway, e.g. within a [`Drop`] +/// implementation or a [panic hook], panicking for the second time while the +/// lock is held will leave the mutex unpoisoned. Note that while double panic +/// usually aborts the program, [`catch_unwind`] can prevent this. +/// +/// - Locking and unlocking the mutex across different panic contexts, e.g. by +/// storing the guard to a [`Cell`] within [`Drop::drop`] and accessing it +/// outside, or vice versa, can affect poisoning status in an unexpected way. +/// +/// - Foreign exceptions do not currently trigger poisoning even in absence of +/// other panics. +/// +/// While this rarely happens in realistic code, `unsafe` code cannot rely on +/// poisoning for soundness, since the behavior of poisoning can depend on +/// outside context. Here's an example of **incorrect** use of poisoning: +/// +/// ```rust +/// use std::sync::Mutex; +/// +/// struct MutexBox { +/// data: Mutex<*mut T>, +/// } +/// +/// impl MutexBox { +/// pub fn new(value: T) -> Self { +/// Self { +/// data: Mutex::new(Box::into_raw(Box::new(value))), +/// } +/// } +/// +/// pub fn replace_with(&self, f: impl FnOnce(T) -> T) { +/// let ptr = self.data.lock().expect("poisoned"); +/// // While `f` is running, the data is moved out of `*ptr`. If `f` +/// // panics, `*ptr` keeps pointing at a dropped value. The intention +/// // is that this will poison the mutex, so the following calls to +/// // `replace_with` will panic without reading `*ptr`. But since +/// // poisoning is not guaranteed to occur if this is run from a panic +/// // hook, this can lead to use-after-free. +/// unsafe { +/// (*ptr).write(f((*ptr).read())); +/// } +/// } +/// } +/// ``` /// /// [`new`]: Self::new /// [`lock`]: Self::lock @@ -39,6 +88,9 @@ use crate::sys::sync as sys; /// [`unwrap()`]: Result::unwrap /// [`PoisonError`]: super::PoisonError /// [`into_inner`]: super::PoisonError::into_inner +/// [panic hook]: crate::panic::set_hook +/// [`catch_unwind`]: crate::panic::catch_unwind +/// [`Cell`]: crate::cell::Cell /// /// # Examples /// diff --git a/library/std/src/sync/poison/once.rs b/library/std/src/sync/poison/once.rs index 103e519540795..faf2913c54730 100644 --- a/library/std/src/sync/poison/once.rs +++ b/library/std/src/sync/poison/once.rs @@ -136,7 +136,8 @@ impl Once { /// it will *poison* this [`Once`] instance, causing all future invocations of /// `call_once` to also panic. /// - /// This is similar to [poisoning with mutexes][poison]. + /// This is similar to [poisoning with mutexes][poison], but this mechanism + /// is guaranteed to never skip panics within `f`. /// /// [poison]: struct.Mutex.html#poisoning #[inline] @@ -293,6 +294,9 @@ impl Once { /// Blocks the current thread until initialization has completed, ignoring /// poisoning. + /// + /// If this [`Once`] has been poisoned, this function blocks until it + /// becomes completed, unlike [`Once::wait()`], which panics in this case. #[stable(feature = "once_wait", since = "1.86.0")] pub fn wait_force(&self) { if !self.inner.is_completed() { diff --git a/library/std/src/sync/poison/rwlock.rs b/library/std/src/sync/poison/rwlock.rs index 934a173425a81..2c92602bc878f 100644 --- a/library/std/src/sync/poison/rwlock.rs +++ b/library/std/src/sync/poison/rwlock.rs @@ -46,10 +46,12 @@ use crate::sys::sync as sys; /// /// # Poisoning /// -/// An `RwLock`, like [`Mutex`], will become poisoned on a panic. Note, however, -/// that an `RwLock` may only be poisoned if a panic occurs while it is locked -/// exclusively (write mode). If a panic occurs in any reader, then the lock -/// will not be poisoned. +/// An `RwLock`, like [`Mutex`], will [usually] become poisoned on a panic. Note, +/// however, that an `RwLock` may only be poisoned if a panic occurs while it is +/// locked exclusively (write mode). If a panic occurs in any reader, then the +/// lock will not be poisoned. +/// +/// [usually]: super::Mutex#poisoning /// /// # Examples /// diff --git a/library/std/src/sys/pal/uefi/helpers.rs b/library/std/src/sys/pal/uefi/helpers.rs index 271dc4d11de75..b50574de937aa 100644 --- a/library/std/src/sys/pal/uefi/helpers.rs +++ b/library/std/src/sys/pal/uefi/helpers.rs @@ -444,17 +444,17 @@ impl<'a> DevicePathNode<'a> { impl<'a> PartialEq for DevicePathNode<'a> { fn eq(&self, other: &Self) -> bool { - let self_len = self.length(); - let other_len = other.length(); - - self_len == other_len - && unsafe { - compiler_builtins::mem::memcmp( - self.protocol.as_ptr().cast(), - other.protocol.as_ptr().cast(), - usize::from(self_len), - ) == 0 - } + // Compare as a single buffer rather than by field since it optimizes better. + // + // SAFETY: `Protocol` is followed by a buffer of `length - sizeof::()`. `Protocol` + // has no padding so it is sound to interpret as a slice. + unsafe { + let s1 = + slice::from_raw_parts(self.protocol.as_ptr().cast::(), self.length().into()); + let s2 = + slice::from_raw_parts(other.protocol.as_ptr().cast::(), other.length().into()); + s1 == s2 + } } } diff --git a/library/std/src/thread/mod.rs b/library/std/src/thread/mod.rs index dff981c900c08..8d3ab82ace120 100644 --- a/library/std/src/thread/mod.rs +++ b/library/std/src/thread/mod.rs @@ -2018,6 +2018,9 @@ fn _assert_sync_and_send() { /// which may take time on systems with large numbers of mountpoints. /// (This does not apply to cgroup v2, or to processes not in a /// cgroup.) +/// - It does not attempt to take `ulimit` into account. If there is a limit set on the number of +/// threads, `available_parallelism` cannot know how much of that limit a Rust program should +/// take, or know in a reliable and race-free way how much of that limit is already taken. /// /// On all targets: /// - It may overcount the amount of parallelism available when running in a VM diff --git a/library/unwind/Cargo.toml b/library/unwind/Cargo.toml index f8da09f71931a..b9ce676eb3ffe 100644 --- a/library/unwind/Cargo.toml +++ b/library/unwind/Cargo.toml @@ -14,9 +14,8 @@ bench = false doc = false [dependencies] -core = { path = "../core" } -compiler_builtins = { path = "../compiler-builtins/compiler-builtins" } cfg-if = "1.0" +core = { path = "../rustc-std-workspace-core", package = "rustc-std-workspace-core" } [target.'cfg(not(all(windows, target_env = "msvc")))'.dependencies] libc = { version = "0.2.140", features = ['rustc-dep-of-std'], default-features = false } diff --git a/rust-toolchain.toml b/rust-toolchain.toml index b3b889ccf7063..4b67ad42eb300 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -2,5 +2,5 @@ # standard library we currently track. [toolchain] -channel = "nightly-2025-08-01" +channel = "nightly-2025-08-06" components = ["llvm-tools-preview", "rustc-dev", "rust-src", "rustfmt"] diff --git a/tool_config/kani-version.toml b/tool_config/kani-version.toml index 9b03a8a4dacf7..ffb2ab558c7a1 100644 --- a/tool_config/kani-version.toml +++ b/tool_config/kani-version.toml @@ -2,4 +2,4 @@ # incompatible with the verify-std repo. [kani] -commit = "53a7a3b516c5cbe504378d4a208d07d49b5aa4ec" +commit = "fade7057e845eb5e5c6c9bc00a6d72b338765683" From 9e522b600f4cacd6eaf1aff99c71666e24aab9f3 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 10 Aug 2025 20:24:35 +0000 Subject: [PATCH 20/20] Update Kani Metrics (#454) This is an automated PR to update Kani metrics. The metrics have been updated by running `./scripts/run-kani.sh --run metrics`. Co-authored-by: github-merge-queue <118344674+github-merge-queue@users.noreply.github.com> --- .../kani-std-analysis/metrics-data-core.json | 22 +++++++++++++++++++ .../kani-std-analysis/metrics-data-std.json | 22 +++++++++++++++++++ 2 files changed, 44 insertions(+) diff --git a/scripts/kani-std-analysis/metrics-data-core.json b/scripts/kani-std-analysis/metrics-data-core.json index 07873816a5e41..aa41626e0821b 100644 --- a/scripts/kani-std-analysis/metrics-data-core.json +++ b/scripts/kani-std-analysis/metrics-data-core.json @@ -556,6 +556,28 @@ "verified_safe_fns_under_contract": 111, "verified_safe_fns_with_loop_under_contract": 0, "total_functions_under_contract_all_crates": 381 + }, + { + "date": "2025-08-10", + "total_unsafe_fns": 7194, + "total_unsafe_fns_with_loop": 22, + "total_safe_abstractions": 1861, + "total_safe_abstractions_with_loop": 65, + "total_safe_fns": 15898, + "total_safe_fns_with_loop": 727, + "unsafe_fns_under_contract": 279, + "unsafe_fns_with_loop_under_contract": 3, + "verified_unsafe_fns_under_contract": 248, + "verified_unsafe_fns_with_loop_under_contract": 1, + "safe_abstractions_under_contract": 77, + "safe_abstractions_with_loop_under_contract": 0, + "verified_safe_abstractions_under_contract": 77, + "verified_safe_abstractions_with_loop_under_contract": 0, + "safe_fns_under_contract": 114, + "safe_fns_with_loop_under_contract": 0, + "verified_safe_fns_under_contract": 111, + "verified_safe_fns_with_loop_under_contract": 0, + "total_functions_under_contract_all_crates": 411 } ] } \ No newline at end of file diff --git a/scripts/kani-std-analysis/metrics-data-std.json b/scripts/kani-std-analysis/metrics-data-std.json index a76c713f1b9ad..49dfd01c269b1 100644 --- a/scripts/kani-std-analysis/metrics-data-std.json +++ b/scripts/kani-std-analysis/metrics-data-std.json @@ -439,6 +439,28 @@ "verified_safe_fns_under_contract": 0, "verified_safe_fns_with_loop_under_contract": 0, "total_functions_under_contract_all_crates": 381 + }, + { + "date": "2025-08-10", + "total_unsafe_fns": 183, + "total_unsafe_fns_with_loop": 12, + "total_safe_abstractions": 504, + "total_safe_abstractions_with_loop": 47, + "total_safe_fns": 4169, + "total_safe_fns_with_loop": 190, + "unsafe_fns_under_contract": 9, + "unsafe_fns_with_loop_under_contract": 0, + "verified_unsafe_fns_under_contract": 7, + "verified_unsafe_fns_with_loop_under_contract": 0, + "safe_abstractions_under_contract": 0, + "safe_abstractions_with_loop_under_contract": 0, + "verified_safe_abstractions_under_contract": 0, + "verified_safe_abstractions_with_loop_under_contract": 0, + "safe_fns_under_contract": 0, + "safe_fns_with_loop_under_contract": 0, + "verified_safe_fns_under_contract": 0, + "verified_safe_fns_with_loop_under_contract": 0, + "total_functions_under_contract_all_crates": 411 } ] } \ No newline at end of file