diff --git a/Cargo.lock b/Cargo.lock index 4d1c6d4c46b23..5289e040920e6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -128,9 +128,9 @@ dependencies = [ [[package]] name = "anstyle-svg" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc03a770ef506fe1396c0e476120ac0e6523cf14b74218dd5f18cd6833326fa9" +checksum = "26b9ec8c976eada1b0f9747a3d7cc4eae3bef10613e443746e7487f26c872fde" dependencies = [ "anstyle", "anstyle-lossy", @@ -674,7 +674,7 @@ checksum = "fe6d2e5af09e8c8ad56c969f2157a3d4238cebc7c55f0a517728c38f7b200f81" dependencies = [ "serde", "termcolor", - "unicode-width 0.1.14", + "unicode-width 0.2.1", ] [[package]] diff --git a/bootstrap.example.toml b/bootstrap.example.toml index 51529751dd58f..0cd571134ef15 100644 --- a/bootstrap.example.toml +++ b/bootstrap.example.toml @@ -325,6 +325,9 @@ # Defaults to the Python interpreter used to execute x.py. #build.python = "python" +# The path to (or name of) the resource compiler executable to use on Windows. +#build.windows-rc = "rc.exe" + # The path to the REUSE executable to use. Note that REUSE is not required in # most cases, as our tooling relies on a cached (and shrunk) copy of the # REUSE output present in the git repository and in our source tarballs. diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs index 8461c8b03d5a4..45c5c9aa5514d 100644 --- a/compiler/rustc_codegen_llvm/src/llvm_util.rs +++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs @@ -217,27 +217,16 @@ impl<'a> IntoIterator for LLVMFeature<'a> { /// Rust can also be build with an external precompiled version of LLVM which might lead to failures /// if the oldest tested / supported LLVM version doesn't yet support the relevant intrinsics. pub(crate) fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> Option> { - let arch = if sess.target.arch == "x86_64" { - "x86" - } else if sess.target.arch == "arm64ec" { - "aarch64" - } else if sess.target.arch == "sparc64" { - "sparc" - } else if sess.target.arch == "powerpc64" { - "powerpc" - } else { - &*sess.target.arch + let raw_arch = &*sess.target.arch; + let arch = match raw_arch { + "x86_64" => "x86", + "arm64ec" => "aarch64", + "sparc64" => "sparc", + "powerpc64" => "powerpc", + _ => raw_arch, }; + let (major, _, _) = get_version(); match (arch, s) { - ("x86", "sse4.2") => Some(LLVMFeature::with_dependencies( - "sse4.2", - smallvec![TargetFeatureFoldStrength::EnableOnly("crc32")], - )), - ("x86", "pclmulqdq") => Some(LLVMFeature::new("pclmul")), - ("x86", "rdrand") => Some(LLVMFeature::new("rdrnd")), - ("x86", "bmi1") => Some(LLVMFeature::new("bmi")), - ("x86", "cmpxchg16b") => Some(LLVMFeature::new("cx16")), - ("x86", "lahfsahf") => Some(LLVMFeature::new("sahf")), ("aarch64", "rcpc2") => Some(LLVMFeature::new("rcpc-immo")), ("aarch64", "dpb") => Some(LLVMFeature::new("ccpp")), ("aarch64", "dpb2") => Some(LLVMFeature::new("ccdp")), @@ -260,14 +249,23 @@ pub(crate) fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> Option None, // only existed in 18 ("arm", "fp16") => Some(LLVMFeature::new("fullfp16")), // Filter out features that are not supported by the current LLVM version - ("loongarch32" | "loongarch64", "32s") if get_version().0 < 21 => None, + ("loongarch32" | "loongarch64", "32s") if major < 21 => None, + ("powerpc", "power8-crypto") => Some(LLVMFeature::new("crypto")), + ("sparc", "leoncasa") => Some(LLVMFeature::new("hasleoncasa")), + ("x86", "sse4.2") => Some(LLVMFeature::with_dependencies( + "sse4.2", + smallvec![TargetFeatureFoldStrength::EnableOnly("crc32")], + )), + ("x86", "pclmulqdq") => Some(LLVMFeature::new("pclmul")), + ("x86", "rdrand") => Some(LLVMFeature::new("rdrnd")), + ("x86", "bmi1") => Some(LLVMFeature::new("bmi")), + ("x86", "cmpxchg16b") => Some(LLVMFeature::new("cx16")), + ("x86", "lahfsahf") => Some(LLVMFeature::new("sahf")), // Enable the evex512 target feature if an avx512 target feature is enabled. ("x86", s) if s.starts_with("avx512") => Some(LLVMFeature::with_dependencies( s, smallvec![TargetFeatureFoldStrength::EnableOnly("evex512")], )), - ("sparc", "leoncasa") => Some(LLVMFeature::new("hasleoncasa")), - ("powerpc", "power8-crypto") => Some(LLVMFeature::new("crypto")), ("x86", "avx10.1") => Some(LLVMFeature::new("avx10.1-512")), ("x86", "avx10.2") => Some(LLVMFeature::new("avx10.2-512")), ("x86", "apxf") => Some(LLVMFeature::with_dependencies( diff --git a/compiler/rustc_codegen_ssa/messages.ftl b/compiler/rustc_codegen_ssa/messages.ftl index 1dd65d38a2be7..91c3806df4c34 100644 --- a/compiler/rustc_codegen_ssa/messages.ftl +++ b/compiler/rustc_codegen_ssa/messages.ftl @@ -8,8 +8,6 @@ codegen_ssa_aix_strip_not_used = using host's `strip` binary to cross-compile to codegen_ssa_archive_build_failure = failed to build archive at `{$path}`: {$error} -codegen_ssa_autodiff_without_lto = using the autodiff feature requires using fat-lto - codegen_ssa_bare_instruction_set = `#[instruction_set]` requires an argument codegen_ssa_binary_output_to_tty = option `-o` or `--emit` is used to write binary output type `{$shorthand}` to stdout, but stdout is a tty diff --git a/compiler/rustc_codegen_ssa/src/errors.rs b/compiler/rustc_codegen_ssa/src/errors.rs index fb5a82051405d..d5c30c5c7a6b0 100644 --- a/compiler/rustc_codegen_ssa/src/errors.rs +++ b/compiler/rustc_codegen_ssa/src/errors.rs @@ -37,10 +37,6 @@ pub(crate) struct CguNotRecorded<'a> { pub cgu_name: &'a str, } -#[derive(Diagnostic)] -#[diag(codegen_ssa_autodiff_without_lto)] -pub struct AutodiffWithoutLto; - #[derive(Diagnostic)] #[diag(codegen_ssa_unknown_reuse_kind)] pub(crate) struct UnknownReuseKind { diff --git a/compiler/rustc_next_trait_solver/src/canonicalizer.rs b/compiler/rustc_next_trait_solver/src/canonical/canonicalizer.rs similarity index 99% rename from compiler/rustc_next_trait_solver/src/canonicalizer.rs rename to compiler/rustc_next_trait_solver/src/canonical/canonicalizer.rs index 4b4ec4956eb88..b25671d676b9c 100644 --- a/compiler/rustc_next_trait_solver/src/canonicalizer.rs +++ b/compiler/rustc_next_trait_solver/src/canonical/canonicalizer.rs @@ -57,7 +57,7 @@ enum CanonicalizeMode { }, } -pub struct Canonicalizer<'a, D: SolverDelegate, I: Interner> { +pub(super) struct Canonicalizer<'a, D: SolverDelegate, I: Interner> { delegate: &'a D, // Immutable field. @@ -83,7 +83,7 @@ pub struct Canonicalizer<'a, D: SolverDelegate, I: Interner> { } impl<'a, D: SolverDelegate, I: Interner> Canonicalizer<'a, D, I> { - pub fn canonicalize_response>( + pub(super) fn canonicalize_response>( delegate: &'a D, max_input_universe: ty::UniverseIndex, variables: &'a mut Vec, @@ -112,7 +112,6 @@ impl<'a, D: SolverDelegate, I: Interner> Canonicalizer<'a, D, I> { let (max_universe, variables) = canonicalizer.finalize(); Canonical { max_universe, variables, value } } - fn canonicalize_param_env( delegate: &'a D, variables: &'a mut Vec, @@ -195,7 +194,7 @@ impl<'a, D: SolverDelegate, I: Interner> Canonicalizer<'a, D, I> { /// /// We want to keep the option of canonicalizing `'static` to an existential /// variable in the future by changing the way we detect global where-bounds. - pub fn canonicalize_input>( + pub(super) fn canonicalize_input>( delegate: &'a D, variables: &'a mut Vec, input: QueryInput, diff --git a/compiler/rustc_next_trait_solver/src/canonical/mod.rs b/compiler/rustc_next_trait_solver/src/canonical/mod.rs new file mode 100644 index 0000000000000..e3520e238ed37 --- /dev/null +++ b/compiler/rustc_next_trait_solver/src/canonical/mod.rs @@ -0,0 +1,364 @@ +//! Canonicalization is used to separate some goal from its context, +//! throwing away unnecessary information in the process. +//! +//! This is necessary to cache goals containing inference variables +//! and placeholders without restricting them to the current `InferCtxt`. +//! +//! Canonicalization is fairly involved, for more details see the relevant +//! section of the [rustc-dev-guide][c]. +//! +//! [c]: https://rustc-dev-guide.rust-lang.org/solve/canonicalization.html + +use std::iter; + +use canonicalizer::Canonicalizer; +use rustc_index::IndexVec; +use rustc_type_ir::inherent::*; +use rustc_type_ir::relate::solver_relating::RelateExt; +use rustc_type_ir::{ + self as ty, Canonical, CanonicalVarKind, CanonicalVarValues, InferCtxtLike, Interner, + TypeFoldable, +}; +use tracing::instrument; + +use crate::delegate::SolverDelegate; +use crate::resolve::eager_resolve_vars; +use crate::solve::{ + CanonicalInput, CanonicalResponse, Certainty, ExternalConstraintsData, Goal, + NestedNormalizationGoals, PredefinedOpaquesData, QueryInput, Response, inspect, +}; + +pub mod canonicalizer; + +trait ResponseT { + fn var_values(&self) -> CanonicalVarValues; +} + +impl ResponseT for Response { + fn var_values(&self) -> CanonicalVarValues { + self.var_values + } +} + +impl ResponseT for inspect::State { + fn var_values(&self) -> CanonicalVarValues { + self.var_values + } +} + +/// Canonicalizes the goal remembering the original values +/// for each bound variable. +/// +/// This expects `goal` and `opaque_types` to be eager resolved. +pub(super) fn canonicalize_goal( + delegate: &D, + goal: Goal, + opaque_types: Vec<(ty::OpaqueTypeKey, I::Ty)>, +) -> (Vec, CanonicalInput) +where + D: SolverDelegate, + I: Interner, +{ + let mut orig_values = Default::default(); + let canonical = Canonicalizer::canonicalize_input( + delegate, + &mut orig_values, + QueryInput { + goal, + predefined_opaques_in_body: delegate + .cx() + .mk_predefined_opaques_in_body(PredefinedOpaquesData { opaque_types }), + }, + ); + let query_input = ty::CanonicalQueryInput { canonical, typing_mode: delegate.typing_mode() }; + (orig_values, query_input) +} + +pub(super) fn canonicalize_response( + delegate: &D, + max_input_universe: ty::UniverseIndex, + value: T, +) -> ty::Canonical +where + D: SolverDelegate, + I: Interner, + T: TypeFoldable, +{ + let mut orig_values = Default::default(); + let canonical = + Canonicalizer::canonicalize_response(delegate, max_input_universe, &mut orig_values, value); + canonical +} + +/// After calling a canonical query, we apply the constraints returned +/// by the query using this function. +/// +/// This happens in three steps: +/// - we instantiate the bound variables of the query response +/// - we unify the `var_values` of the response with the `original_values` +/// - we apply the `external_constraints` returned by the query, returning +/// the `normalization_nested_goals` +pub(super) fn instantiate_and_apply_query_response( + delegate: &D, + param_env: I::ParamEnv, + original_values: &[I::GenericArg], + response: CanonicalResponse, + span: I::Span, +) -> (NestedNormalizationGoals, Certainty) +where + D: SolverDelegate, + I: Interner, +{ + let instantiation = + compute_query_response_instantiation_values(delegate, &original_values, &response, span); + + let Response { var_values, external_constraints, certainty } = + delegate.instantiate_canonical(response, instantiation); + + unify_query_var_values(delegate, param_env, &original_values, var_values, span); + + let ExternalConstraintsData { region_constraints, opaque_types, normalization_nested_goals } = + &*external_constraints; + + register_region_constraints(delegate, region_constraints, span); + register_new_opaque_types(delegate, opaque_types, span); + + (normalization_nested_goals.clone(), certainty) +} + +/// This returns the canonical variable values to instantiate the bound variables of +/// the canonical response. This depends on the `original_values` for the +/// bound variables. +fn compute_query_response_instantiation_values( + delegate: &D, + original_values: &[I::GenericArg], + response: &Canonical, + span: I::Span, +) -> CanonicalVarValues +where + D: SolverDelegate, + I: Interner, + T: ResponseT, +{ + // FIXME: Longterm canonical queries should deal with all placeholders + // created inside of the query directly instead of returning them to the + // caller. + let prev_universe = delegate.universe(); + let universes_created_in_query = response.max_universe.index(); + for _ in 0..universes_created_in_query { + delegate.create_next_universe(); + } + + let var_values = response.value.var_values(); + assert_eq!(original_values.len(), var_values.len()); + + // If the query did not make progress with constraining inference variables, + // we would normally create a new inference variables for bound existential variables + // only then unify this new inference variable with the inference variable from + // the input. + // + // We therefore instantiate the existential variable in the canonical response with the + // inference variable of the input right away, which is more performant. + let mut opt_values = IndexVec::from_elem_n(None, response.variables.len()); + for (original_value, result_value) in iter::zip(original_values, var_values.var_values.iter()) { + match result_value.kind() { + ty::GenericArgKind::Type(t) => { + // We disable the instantiation guess for inference variables + // and only use it for placeholders. We need to handle the + // `sub_root` of type inference variables which would make this + // more involved. They are also a lot rarer than region variables. + if let ty::Bound(debruijn, b) = t.kind() + && !matches!( + response.variables.get(b.var().as_usize()).unwrap(), + CanonicalVarKind::Ty { .. } + ) + { + assert_eq!(debruijn, ty::INNERMOST); + opt_values[b.var()] = Some(*original_value); + } + } + ty::GenericArgKind::Lifetime(r) => { + if let ty::ReBound(debruijn, br) = r.kind() { + assert_eq!(debruijn, ty::INNERMOST); + opt_values[br.var()] = Some(*original_value); + } + } + ty::GenericArgKind::Const(c) => { + if let ty::ConstKind::Bound(debruijn, bv) = c.kind() { + assert_eq!(debruijn, ty::INNERMOST); + opt_values[bv.var()] = Some(*original_value); + } + } + } + } + CanonicalVarValues::instantiate(delegate.cx(), response.variables, |var_values, kind| { + if kind.universe() != ty::UniverseIndex::ROOT { + // A variable from inside a binder of the query. While ideally these shouldn't + // exist at all (see the FIXME at the start of this method), we have to deal with + // them for now. + delegate.instantiate_canonical_var(kind, span, &var_values, |idx| { + prev_universe + idx.index() + }) + } else if kind.is_existential() { + // As an optimization we sometimes avoid creating a new inference variable here. + // + // All new inference variables we create start out in the current universe of the caller. + // This is conceptually wrong as these inference variables would be able to name + // more placeholders then they should be able to. However the inference variables have + // to "come from somewhere", so by equating them with the original values of the caller + // later on, we pull them down into their correct universe again. + if let Some(v) = opt_values[ty::BoundVar::from_usize(var_values.len())] { + v + } else { + delegate.instantiate_canonical_var(kind, span, &var_values, |_| prev_universe) + } + } else { + // For placeholders which were already part of the input, we simply map this + // universal bound variable back the placeholder of the input. + original_values[kind.expect_placeholder_index()] + } + }) +} + +/// Unify the `original_values` with the `var_values` returned by the canonical query.. +/// +/// This assumes that this unification will always succeed. This is the case when +/// applying a query response right away. However, calling a canonical query, doing any +/// other kind of trait solving, and only then instantiating the result of the query +/// can cause the instantiation to fail. This is not supported and we ICE in this case. +/// +/// We always structurally instantiate aliases. Relating aliases needs to be different +/// depending on whether the alias is *rigid* or not. We're only really able to tell +/// whether an alias is rigid by using the trait solver. When instantiating a response +/// from the solver we assume that the solver correctly handled aliases and therefore +/// always relate them structurally here. +#[instrument(level = "trace", skip(delegate))] +fn unify_query_var_values( + delegate: &D, + param_env: I::ParamEnv, + original_values: &[I::GenericArg], + var_values: CanonicalVarValues, + span: I::Span, +) where + D: SolverDelegate, + I: Interner, +{ + assert_eq!(original_values.len(), var_values.len()); + + for (&orig, response) in iter::zip(original_values, var_values.var_values.iter()) { + let goals = + delegate.eq_structurally_relating_aliases(param_env, orig, response, span).unwrap(); + assert!(goals.is_empty()); + } +} + +fn register_region_constraints( + delegate: &D, + outlives: &[ty::OutlivesPredicate], + span: I::Span, +) where + D: SolverDelegate, + I: Interner, +{ + for &ty::OutlivesPredicate(lhs, rhs) in outlives { + match lhs.kind() { + ty::GenericArgKind::Lifetime(lhs) => delegate.sub_regions(rhs, lhs, span), + ty::GenericArgKind::Type(lhs) => delegate.register_ty_outlives(lhs, rhs, span), + ty::GenericArgKind::Const(_) => panic!("const outlives: {lhs:?}: {rhs:?}"), + } + } +} + +fn register_new_opaque_types( + delegate: &D, + opaque_types: &[(ty::OpaqueTypeKey, I::Ty)], + span: I::Span, +) where + D: SolverDelegate, + I: Interner, +{ + for &(key, ty) in opaque_types { + let prev = delegate.register_hidden_type_in_storage(key, ty, span); + // We eagerly resolve inference variables when computing the query response. + // This can cause previously distinct opaque type keys to now be structurally equal. + // + // To handle this, we store any duplicate entries in a separate list to check them + // at the end of typeck/borrowck. We could alternatively eagerly equate the hidden + // types here. However, doing so is difficult as it may result in nested goals and + // any errors may make it harder to track the control flow for diagnostics. + if let Some(prev) = prev { + delegate.add_duplicate_opaque_type(key, prev, span); + } + } +} + +/// Used by proof trees to be able to recompute intermediate actions while +/// evaluating a goal. The `var_values` not only include the bound variables +/// of the query input, but also contain all unconstrained inference vars +/// created while evaluating this goal. +pub fn make_canonical_state( + delegate: &D, + var_values: &[I::GenericArg], + max_input_universe: ty::UniverseIndex, + data: T, +) -> inspect::CanonicalState +where + D: SolverDelegate, + I: Interner, + T: TypeFoldable, +{ + let var_values = CanonicalVarValues { var_values: delegate.cx().mk_args(var_values) }; + let state = inspect::State { var_values, data }; + let state = eager_resolve_vars(delegate, state); + Canonicalizer::canonicalize_response(delegate, max_input_universe, &mut vec![], state) +} + +// FIXME: needs to be pub to be accessed by downstream +// `rustc_trait_selection::solve::inspect::analyse`. +pub fn instantiate_canonical_state( + delegate: &D, + span: I::Span, + param_env: I::ParamEnv, + orig_values: &mut Vec, + state: inspect::CanonicalState, +) -> T +where + D: SolverDelegate, + I: Interner, + T: TypeFoldable, +{ + // In case any fresh inference variables have been created between `state` + // and the previous instantiation, extend `orig_values` for it. + orig_values.extend( + state.value.var_values.var_values.as_slice()[orig_values.len()..] + .iter() + .map(|&arg| delegate.fresh_var_for_kind_with_span(arg, span)), + ); + + let instantiation = + compute_query_response_instantiation_values(delegate, orig_values, &state, span); + + let inspect::State { var_values, data } = delegate.instantiate_canonical(state, instantiation); + + unify_query_var_values(delegate, param_env, orig_values, var_values, span); + data +} + +pub fn response_no_constraints_raw( + cx: I, + max_universe: ty::UniverseIndex, + variables: I::CanonicalVarKinds, + certainty: Certainty, +) -> CanonicalResponse { + ty::Canonical { + max_universe, + variables, + value: Response { + var_values: ty::CanonicalVarValues::make_identity(cx, variables), + // FIXME: maybe we should store the "no response" version in cx, like + // we do for cx.types and stuff. + external_constraints: cx.mk_external_constraints(ExternalConstraintsData::default()), + certainty, + }, + } +} diff --git a/compiler/rustc_next_trait_solver/src/lib.rs b/compiler/rustc_next_trait_solver/src/lib.rs index d3965e14c68a8..5fa29b7d9f813 100644 --- a/compiler/rustc_next_trait_solver/src/lib.rs +++ b/compiler/rustc_next_trait_solver/src/lib.rs @@ -10,7 +10,7 @@ #![allow(rustc::usage_of_type_ir_traits)] // tidy-alphabetical-end -pub mod canonicalizer; +pub mod canonical; pub mod coherence; pub mod delegate; pub mod placeholder; diff --git a/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/canonical.rs b/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/canonical.rs deleted file mode 100644 index 889588afe61ff..0000000000000 --- a/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/canonical.rs +++ /dev/null @@ -1,517 +0,0 @@ -//! Canonicalization is used to separate some goal from its context, -//! throwing away unnecessary information in the process. -//! -//! This is necessary to cache goals containing inference variables -//! and placeholders without restricting them to the current `InferCtxt`. -//! -//! Canonicalization is fairly involved, for more details see the relevant -//! section of the [rustc-dev-guide][c]. -//! -//! [c]: https://rustc-dev-guide.rust-lang.org/solve/canonicalization.html - -use std::iter; - -use rustc_index::IndexVec; -use rustc_type_ir::data_structures::HashSet; -use rustc_type_ir::inherent::*; -use rustc_type_ir::relate::solver_relating::RelateExt; -use rustc_type_ir::solve::OpaqueTypesJank; -use rustc_type_ir::{ - self as ty, Canonical, CanonicalVarKind, CanonicalVarValues, InferCtxtLike, Interner, - TypeFoldable, -}; -use tracing::{debug, instrument, trace}; - -use crate::canonicalizer::Canonicalizer; -use crate::delegate::SolverDelegate; -use crate::resolve::eager_resolve_vars; -use crate::solve::eval_ctxt::CurrentGoalKind; -use crate::solve::{ - CanonicalInput, CanonicalResponse, Certainty, EvalCtxt, ExternalConstraintsData, Goal, - MaybeCause, NestedNormalizationGoals, NoSolution, PredefinedOpaquesData, QueryInput, - QueryResult, Response, inspect, response_no_constraints_raw, -}; - -trait ResponseT { - fn var_values(&self) -> CanonicalVarValues; -} - -impl ResponseT for Response { - fn var_values(&self) -> CanonicalVarValues { - self.var_values - } -} - -impl ResponseT for inspect::State { - fn var_values(&self) -> CanonicalVarValues { - self.var_values - } -} - -impl EvalCtxt<'_, D> -where - D: SolverDelegate, - I: Interner, -{ - /// Canonicalizes the goal remembering the original values - /// for each bound variable. - /// - /// This expects `goal` and `opaque_types` to be eager resolved. - pub(super) fn canonicalize_goal( - delegate: &D, - goal: Goal, - opaque_types: Vec<(ty::OpaqueTypeKey, I::Ty)>, - ) -> (Vec, CanonicalInput) { - let mut orig_values = Default::default(); - let canonical = Canonicalizer::canonicalize_input( - delegate, - &mut orig_values, - QueryInput { - goal, - predefined_opaques_in_body: delegate - .cx() - .mk_predefined_opaques_in_body(PredefinedOpaquesData { opaque_types }), - }, - ); - let query_input = - ty::CanonicalQueryInput { canonical, typing_mode: delegate.typing_mode() }; - (orig_values, query_input) - } - - /// To return the constraints of a canonical query to the caller, we canonicalize: - /// - /// - `var_values`: a map from bound variables in the canonical goal to - /// the values inferred while solving the instantiated goal. - /// - `external_constraints`: additional constraints which aren't expressible - /// using simple unification of inference variables. - /// - /// This takes the `shallow_certainty` which represents whether we're confident - /// that the final result of the current goal only depends on the nested goals. - /// - /// In case this is `Certainty::Maybe`, there may still be additional nested goals - /// or inference constraints required for this candidate to be hold. The candidate - /// always requires all already added constraints and nested goals. - #[instrument(level = "trace", skip(self), ret)] - pub(in crate::solve) fn evaluate_added_goals_and_make_canonical_response( - &mut self, - shallow_certainty: Certainty, - ) -> QueryResult { - self.inspect.make_canonical_response(shallow_certainty); - - let goals_certainty = self.try_evaluate_added_goals()?; - assert_eq!( - self.tainted, - Ok(()), - "EvalCtxt is tainted -- nested goals may have been dropped in a \ - previous call to `try_evaluate_added_goals!`" - ); - - // We only check for leaks from universes which were entered inside - // of the query. - self.delegate.leak_check(self.max_input_universe).map_err(|NoSolution| { - trace!("failed the leak check"); - NoSolution - })?; - - let (certainty, normalization_nested_goals) = - match (self.current_goal_kind, shallow_certainty) { - // When normalizing, we've replaced the expected term with an unconstrained - // inference variable. This means that we dropped information which could - // have been important. We handle this by instead returning the nested goals - // to the caller, where they are then handled. We only do so if we do not - // need to recompute the `NormalizesTo` goal afterwards to avoid repeatedly - // uplifting its nested goals. This is the case if the `shallow_certainty` is - // `Certainty::Yes`. - (CurrentGoalKind::NormalizesTo, Certainty::Yes) => { - let goals = std::mem::take(&mut self.nested_goals); - // As we return all ambiguous nested goals, we can ignore the certainty - // returned by `self.try_evaluate_added_goals()`. - if goals.is_empty() { - assert!(matches!(goals_certainty, Certainty::Yes)); - } - ( - Certainty::Yes, - NestedNormalizationGoals( - goals.into_iter().map(|(s, g, _)| (s, g)).collect(), - ), - ) - } - _ => { - let certainty = shallow_certainty.and(goals_certainty); - (certainty, NestedNormalizationGoals::empty()) - } - }; - - if let Certainty::Maybe { - cause: cause @ MaybeCause::Overflow { keep_constraints: false, .. }, - opaque_types_jank, - } = certainty - { - // If we have overflow, it's probable that we're substituting a type - // into itself infinitely and any partial substitutions in the query - // response are probably not useful anyways, so just return an empty - // query response. - // - // This may prevent us from potentially useful inference, e.g. - // 2 candidates, one ambiguous and one overflow, which both - // have the same inference constraints. - // - // Changing this to retain some constraints in the future - // won't be a breaking change, so this is good enough for now. - return Ok(self.make_ambiguous_response_no_constraints(cause, opaque_types_jank)); - } - - let external_constraints = - self.compute_external_query_constraints(certainty, normalization_nested_goals); - let (var_values, mut external_constraints) = - eager_resolve_vars(self.delegate, (self.var_values, external_constraints)); - - // Remove any trivial or duplicated region constraints once we've resolved regions - let mut unique = HashSet::default(); - external_constraints.region_constraints.retain(|outlives| { - outlives.0.as_region().is_none_or(|re| re != outlives.1) && unique.insert(*outlives) - }); - - let canonical = Canonicalizer::canonicalize_response( - self.delegate, - self.max_input_universe, - &mut Default::default(), - Response { - var_values, - certainty, - external_constraints: self.cx().mk_external_constraints(external_constraints), - }, - ); - - // HACK: We bail with overflow if the response would have too many non-region - // inference variables. This tends to only happen if we encounter a lot of - // ambiguous alias types which get replaced with fresh inference variables - // during generalization. This prevents hangs caused by an exponential blowup, - // see tests/ui/traits/next-solver/coherence-alias-hang.rs. - match self.current_goal_kind { - // We don't do so for `NormalizesTo` goals as we erased the expected term and - // bailing with overflow here would prevent us from detecting a type-mismatch, - // causing a coherence error in diesel, see #131969. We still bail with overflow - // when later returning from the parent AliasRelate goal. - CurrentGoalKind::NormalizesTo => {} - CurrentGoalKind::Misc | CurrentGoalKind::CoinductiveTrait => { - let num_non_region_vars = canonical - .variables - .iter() - .filter(|c| !c.is_region() && c.is_existential()) - .count(); - if num_non_region_vars > self.cx().recursion_limit() { - debug!(?num_non_region_vars, "too many inference variables -> overflow"); - return Ok(self.make_ambiguous_response_no_constraints( - MaybeCause::Overflow { - suggest_increasing_limit: true, - keep_constraints: false, - }, - OpaqueTypesJank::AllGood, - )); - } - } - } - - Ok(canonical) - } - - /// Constructs a totally unconstrained, ambiguous response to a goal. - /// - /// Take care when using this, since often it's useful to respond with - /// ambiguity but return constrained variables to guide inference. - pub(in crate::solve) fn make_ambiguous_response_no_constraints( - &self, - cause: MaybeCause, - opaque_types_jank: OpaqueTypesJank, - ) -> CanonicalResponse { - response_no_constraints_raw( - self.cx(), - self.max_input_universe, - self.variables, - Certainty::Maybe { cause, opaque_types_jank }, - ) - } - - /// Computes the region constraints and *new* opaque types registered when - /// proving a goal. - /// - /// If an opaque was already constrained before proving this goal, then the - /// external constraints do not need to record that opaque, since if it is - /// further constrained by inference, that will be passed back in the var - /// values. - #[instrument(level = "trace", skip(self), ret)] - fn compute_external_query_constraints( - &self, - certainty: Certainty, - normalization_nested_goals: NestedNormalizationGoals, - ) -> ExternalConstraintsData { - // We only return region constraints once the certainty is `Yes`. This - // is necessary as we may drop nested goals on ambiguity, which may result - // in unconstrained inference variables in the region constraints. It also - // prevents us from emitting duplicate region constraints, avoiding some - // unnecessary work. This slightly weakens the leak check in case it uses - // region constraints from an ambiguous nested goal. This is tested in both - // `tests/ui/higher-ranked/leak-check/leak-check-in-selection-5-ambig.rs` and - // `tests/ui/higher-ranked/leak-check/leak-check-in-selection-6-ambig-unify.rs`. - let region_constraints = if certainty == Certainty::Yes { - self.delegate.make_deduplicated_outlives_constraints() - } else { - Default::default() - }; - - // We only return *newly defined* opaque types from canonical queries. - // - // Constraints for any existing opaque types are already tracked by changes - // to the `var_values`. - let opaque_types = self - .delegate - .clone_opaque_types_added_since(self.initial_opaque_types_storage_num_entries); - - ExternalConstraintsData { region_constraints, opaque_types, normalization_nested_goals } - } - - /// After calling a canonical query, we apply the constraints returned - /// by the query using this function. - /// - /// This happens in three steps: - /// - we instantiate the bound variables of the query response - /// - we unify the `var_values` of the response with the `original_values` - /// - we apply the `external_constraints` returned by the query, returning - /// the `normalization_nested_goals` - pub(super) fn instantiate_and_apply_query_response( - delegate: &D, - param_env: I::ParamEnv, - original_values: &[I::GenericArg], - response: CanonicalResponse, - span: I::Span, - ) -> (NestedNormalizationGoals, Certainty) { - let instantiation = Self::compute_query_response_instantiation_values( - delegate, - &original_values, - &response, - span, - ); - - let Response { var_values, external_constraints, certainty } = - delegate.instantiate_canonical(response, instantiation); - - Self::unify_query_var_values(delegate, param_env, &original_values, var_values, span); - - let ExternalConstraintsData { - region_constraints, - opaque_types, - normalization_nested_goals, - } = &*external_constraints; - - Self::register_region_constraints(delegate, region_constraints, span); - Self::register_new_opaque_types(delegate, opaque_types, span); - - (normalization_nested_goals.clone(), certainty) - } - - /// This returns the canonical variable values to instantiate the bound variables of - /// the canonical response. This depends on the `original_values` for the - /// bound variables. - fn compute_query_response_instantiation_values>( - delegate: &D, - original_values: &[I::GenericArg], - response: &Canonical, - span: I::Span, - ) -> CanonicalVarValues { - // FIXME: Longterm canonical queries should deal with all placeholders - // created inside of the query directly instead of returning them to the - // caller. - let prev_universe = delegate.universe(); - let universes_created_in_query = response.max_universe.index(); - for _ in 0..universes_created_in_query { - delegate.create_next_universe(); - } - - let var_values = response.value.var_values(); - assert_eq!(original_values.len(), var_values.len()); - - // If the query did not make progress with constraining inference variables, - // we would normally create a new inference variables for bound existential variables - // only then unify this new inference variable with the inference variable from - // the input. - // - // We therefore instantiate the existential variable in the canonical response with the - // inference variable of the input right away, which is more performant. - let mut opt_values = IndexVec::from_elem_n(None, response.variables.len()); - for (original_value, result_value) in - iter::zip(original_values, var_values.var_values.iter()) - { - match result_value.kind() { - ty::GenericArgKind::Type(t) => { - // We disable the instantiation guess for inference variables - // and only use it for placeholders. We need to handle the - // `sub_root` of type inference variables which would make this - // more involved. They are also a lot rarer than region variables. - if let ty::Bound(debruijn, b) = t.kind() - && !matches!( - response.variables.get(b.var().as_usize()).unwrap(), - CanonicalVarKind::Ty { .. } - ) - { - assert_eq!(debruijn, ty::INNERMOST); - opt_values[b.var()] = Some(*original_value); - } - } - ty::GenericArgKind::Lifetime(r) => { - if let ty::ReBound(debruijn, br) = r.kind() { - assert_eq!(debruijn, ty::INNERMOST); - opt_values[br.var()] = Some(*original_value); - } - } - ty::GenericArgKind::Const(c) => { - if let ty::ConstKind::Bound(debruijn, bv) = c.kind() { - assert_eq!(debruijn, ty::INNERMOST); - opt_values[bv.var()] = Some(*original_value); - } - } - } - } - CanonicalVarValues::instantiate(delegate.cx(), response.variables, |var_values, kind| { - if kind.universe() != ty::UniverseIndex::ROOT { - // A variable from inside a binder of the query. While ideally these shouldn't - // exist at all (see the FIXME at the start of this method), we have to deal with - // them for now. - delegate.instantiate_canonical_var(kind, span, &var_values, |idx| { - prev_universe + idx.index() - }) - } else if kind.is_existential() { - // As an optimization we sometimes avoid creating a new inference variable here. - // - // All new inference variables we create start out in the current universe of the caller. - // This is conceptually wrong as these inference variables would be able to name - // more placeholders then they should be able to. However the inference variables have - // to "come from somewhere", so by equating them with the original values of the caller - // later on, we pull them down into their correct universe again. - if let Some(v) = opt_values[ty::BoundVar::from_usize(var_values.len())] { - v - } else { - delegate.instantiate_canonical_var(kind, span, &var_values, |_| prev_universe) - } - } else { - // For placeholders which were already part of the input, we simply map this - // universal bound variable back the placeholder of the input. - original_values[kind.expect_placeholder_index()] - } - }) - } - - /// Unify the `original_values` with the `var_values` returned by the canonical query.. - /// - /// This assumes that this unification will always succeed. This is the case when - /// applying a query response right away. However, calling a canonical query, doing any - /// other kind of trait solving, and only then instantiating the result of the query - /// can cause the instantiation to fail. This is not supported and we ICE in this case. - /// - /// We always structurally instantiate aliases. Relating aliases needs to be different - /// depending on whether the alias is *rigid* or not. We're only really able to tell - /// whether an alias is rigid by using the trait solver. When instantiating a response - /// from the solver we assume that the solver correctly handled aliases and therefore - /// always relate them structurally here. - #[instrument(level = "trace", skip(delegate))] - fn unify_query_var_values( - delegate: &D, - param_env: I::ParamEnv, - original_values: &[I::GenericArg], - var_values: CanonicalVarValues, - span: I::Span, - ) { - assert_eq!(original_values.len(), var_values.len()); - - for (&orig, response) in iter::zip(original_values, var_values.var_values.iter()) { - let goals = - delegate.eq_structurally_relating_aliases(param_env, orig, response, span).unwrap(); - assert!(goals.is_empty()); - } - } - - fn register_region_constraints( - delegate: &D, - outlives: &[ty::OutlivesPredicate], - span: I::Span, - ) { - for &ty::OutlivesPredicate(lhs, rhs) in outlives { - match lhs.kind() { - ty::GenericArgKind::Lifetime(lhs) => delegate.sub_regions(rhs, lhs, span), - ty::GenericArgKind::Type(lhs) => delegate.register_ty_outlives(lhs, rhs, span), - ty::GenericArgKind::Const(_) => panic!("const outlives: {lhs:?}: {rhs:?}"), - } - } - } - - fn register_new_opaque_types( - delegate: &D, - opaque_types: &[(ty::OpaqueTypeKey, I::Ty)], - span: I::Span, - ) { - for &(key, ty) in opaque_types { - let prev = delegate.register_hidden_type_in_storage(key, ty, span); - // We eagerly resolve inference variables when computing the query response. - // This can cause previously distinct opaque type keys to now be structurally equal. - // - // To handle this, we store any duplicate entries in a separate list to check them - // at the end of typeck/borrowck. We could alternatively eagerly equate the hidden - // types here. However, doing so is difficult as it may result in nested goals and - // any errors may make it harder to track the control flow for diagnostics. - if let Some(prev) = prev { - delegate.add_duplicate_opaque_type(key, prev, span); - } - } - } -} - -/// Used by proof trees to be able to recompute intermediate actions while -/// evaluating a goal. The `var_values` not only include the bound variables -/// of the query input, but also contain all unconstrained inference vars -/// created while evaluating this goal. -pub(in crate::solve) fn make_canonical_state( - delegate: &D, - var_values: &[I::GenericArg], - max_input_universe: ty::UniverseIndex, - data: T, -) -> inspect::CanonicalState -where - D: SolverDelegate, - I: Interner, - T: TypeFoldable, -{ - let var_values = CanonicalVarValues { var_values: delegate.cx().mk_args(var_values) }; - let state = inspect::State { var_values, data }; - let state = eager_resolve_vars(delegate, state); - Canonicalizer::canonicalize_response(delegate, max_input_universe, &mut vec![], state) -} - -// FIXME: needs to be pub to be accessed by downstream -// `rustc_trait_selection::solve::inspect::analyse`. -pub fn instantiate_canonical_state>( - delegate: &D, - span: I::Span, - param_env: I::ParamEnv, - orig_values: &mut Vec, - state: inspect::CanonicalState, -) -> T -where - D: SolverDelegate, - I: Interner, -{ - // In case any fresh inference variables have been created between `state` - // and the previous instantiation, extend `orig_values` for it. - orig_values.extend( - state.value.var_values.var_values.as_slice()[orig_values.len()..] - .iter() - .map(|&arg| delegate.fresh_var_for_kind_with_span(arg, span)), - ); - - let instantiation = - EvalCtxt::compute_query_response_instantiation_values(delegate, orig_values, &state, span); - - let inspect::State { var_values, data } = delegate.instantiate_canonical(state, instantiation); - - EvalCtxt::unify_query_var_values(delegate, param_env, orig_values, var_values, span); - data -} diff --git a/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/mod.rs b/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/mod.rs index 5df7c92d88145..bb86357a85f8a 100644 --- a/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/mod.rs +++ b/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/mod.rs @@ -17,6 +17,10 @@ use rustc_type_ir::{ use tracing::{debug, instrument, trace}; use super::has_only_region_constraints; +use crate::canonical::{ + canonicalize_goal, canonicalize_response, instantiate_and_apply_query_response, + response_no_constraints_raw, +}; use crate::coherence; use crate::delegate::SolverDelegate; use crate::placeholder::BoundVarReplacer; @@ -24,12 +28,11 @@ use crate::resolve::eager_resolve_vars; use crate::solve::search_graph::SearchGraph; use crate::solve::ty::may_use_unstable_feature; use crate::solve::{ - CanonicalInput, Certainty, FIXPOINT_STEP_LIMIT, Goal, GoalEvaluation, GoalSource, - GoalStalledOn, HasChanged, NestedNormalizationGoals, NoSolution, QueryInput, QueryResult, - inspect, + CanonicalInput, CanonicalResponse, Certainty, ExternalConstraintsData, FIXPOINT_STEP_LIMIT, + Goal, GoalEvaluation, GoalSource, GoalStalledOn, HasChanged, MaybeCause, + NestedNormalizationGoals, NoSolution, QueryInput, QueryResult, Response, inspect, }; -pub(super) mod canonical; mod probe; /// The kind of goal we're currently proving. @@ -464,8 +467,7 @@ where let opaque_types = self.delegate.clone_opaque_types_lookup_table(); let (goal, opaque_types) = eager_resolve_vars(self.delegate, (goal, opaque_types)); - let (orig_values, canonical_goal) = - Self::canonicalize_goal(self.delegate, goal, opaque_types); + let (orig_values, canonical_goal) = canonicalize_goal(self.delegate, goal, opaque_types); let canonical_result = self.search_graph.evaluate_goal( self.cx(), canonical_goal, @@ -480,7 +482,7 @@ where let has_changed = if !has_only_region_constraints(response) { HasChanged::Yes } else { HasChanged::No }; - let (normalization_nested_goals, certainty) = Self::instantiate_and_apply_query_response( + let (normalization_nested_goals, certainty) = instantiate_and_apply_query_response( self.delegate, goal.param_env, &orig_values, @@ -1223,6 +1225,198 @@ where vec![] } } + + /// To return the constraints of a canonical query to the caller, we canonicalize: + /// + /// - `var_values`: a map from bound variables in the canonical goal to + /// the values inferred while solving the instantiated goal. + /// - `external_constraints`: additional constraints which aren't expressible + /// using simple unification of inference variables. + /// + /// This takes the `shallow_certainty` which represents whether we're confident + /// that the final result of the current goal only depends on the nested goals. + /// + /// In case this is `Certainty::Maybe`, there may still be additional nested goals + /// or inference constraints required for this candidate to be hold. The candidate + /// always requires all already added constraints and nested goals. + #[instrument(level = "trace", skip(self), ret)] + pub(in crate::solve) fn evaluate_added_goals_and_make_canonical_response( + &mut self, + shallow_certainty: Certainty, + ) -> QueryResult { + self.inspect.make_canonical_response(shallow_certainty); + + let goals_certainty = self.try_evaluate_added_goals()?; + assert_eq!( + self.tainted, + Ok(()), + "EvalCtxt is tainted -- nested goals may have been dropped in a \ + previous call to `try_evaluate_added_goals!`" + ); + + // We only check for leaks from universes which were entered inside + // of the query. + self.delegate.leak_check(self.max_input_universe).map_err(|NoSolution| { + trace!("failed the leak check"); + NoSolution + })?; + + let (certainty, normalization_nested_goals) = + match (self.current_goal_kind, shallow_certainty) { + // When normalizing, we've replaced the expected term with an unconstrained + // inference variable. This means that we dropped information which could + // have been important. We handle this by instead returning the nested goals + // to the caller, where they are then handled. We only do so if we do not + // need to recompute the `NormalizesTo` goal afterwards to avoid repeatedly + // uplifting its nested goals. This is the case if the `shallow_certainty` is + // `Certainty::Yes`. + (CurrentGoalKind::NormalizesTo, Certainty::Yes) => { + let goals = std::mem::take(&mut self.nested_goals); + // As we return all ambiguous nested goals, we can ignore the certainty + // returned by `self.try_evaluate_added_goals()`. + if goals.is_empty() { + assert!(matches!(goals_certainty, Certainty::Yes)); + } + ( + Certainty::Yes, + NestedNormalizationGoals( + goals.into_iter().map(|(s, g, _)| (s, g)).collect(), + ), + ) + } + _ => { + let certainty = shallow_certainty.and(goals_certainty); + (certainty, NestedNormalizationGoals::empty()) + } + }; + + if let Certainty::Maybe { + cause: cause @ MaybeCause::Overflow { keep_constraints: false, .. }, + opaque_types_jank, + } = certainty + { + // If we have overflow, it's probable that we're substituting a type + // into itself infinitely and any partial substitutions in the query + // response are probably not useful anyways, so just return an empty + // query response. + // + // This may prevent us from potentially useful inference, e.g. + // 2 candidates, one ambiguous and one overflow, which both + // have the same inference constraints. + // + // Changing this to retain some constraints in the future + // won't be a breaking change, so this is good enough for now. + return Ok(self.make_ambiguous_response_no_constraints(cause, opaque_types_jank)); + } + + let external_constraints = + self.compute_external_query_constraints(certainty, normalization_nested_goals); + let (var_values, mut external_constraints) = + eager_resolve_vars(self.delegate, (self.var_values, external_constraints)); + + // Remove any trivial or duplicated region constraints once we've resolved regions + let mut unique = HashSet::default(); + external_constraints.region_constraints.retain(|outlives| { + outlives.0.as_region().is_none_or(|re| re != outlives.1) && unique.insert(*outlives) + }); + + let canonical = canonicalize_response( + self.delegate, + self.max_input_universe, + Response { + var_values, + certainty, + external_constraints: self.cx().mk_external_constraints(external_constraints), + }, + ); + + // HACK: We bail with overflow if the response would have too many non-region + // inference variables. This tends to only happen if we encounter a lot of + // ambiguous alias types which get replaced with fresh inference variables + // during generalization. This prevents hangs caused by an exponential blowup, + // see tests/ui/traits/next-solver/coherence-alias-hang.rs. + match self.current_goal_kind { + // We don't do so for `NormalizesTo` goals as we erased the expected term and + // bailing with overflow here would prevent us from detecting a type-mismatch, + // causing a coherence error in diesel, see #131969. We still bail with overflow + // when later returning from the parent AliasRelate goal. + CurrentGoalKind::NormalizesTo => {} + CurrentGoalKind::Misc | CurrentGoalKind::CoinductiveTrait => { + let num_non_region_vars = canonical + .variables + .iter() + .filter(|c| !c.is_region() && c.is_existential()) + .count(); + if num_non_region_vars > self.cx().recursion_limit() { + debug!(?num_non_region_vars, "too many inference variables -> overflow"); + return Ok(self.make_ambiguous_response_no_constraints( + MaybeCause::Overflow { + suggest_increasing_limit: true, + keep_constraints: false, + }, + OpaqueTypesJank::AllGood, + )); + } + } + } + + Ok(canonical) + } + + /// Constructs a totally unconstrained, ambiguous response to a goal. + /// + /// Take care when using this, since often it's useful to respond with + /// ambiguity but return constrained variables to guide inference. + pub(in crate::solve) fn make_ambiguous_response_no_constraints( + &self, + cause: MaybeCause, + opaque_types_jank: OpaqueTypesJank, + ) -> CanonicalResponse { + response_no_constraints_raw( + self.cx(), + self.max_input_universe, + self.variables, + Certainty::Maybe { cause, opaque_types_jank }, + ) + } + + /// Computes the region constraints and *new* opaque types registered when + /// proving a goal. + /// + /// If an opaque was already constrained before proving this goal, then the + /// external constraints do not need to record that opaque, since if it is + /// further constrained by inference, that will be passed back in the var + /// values. + #[instrument(level = "trace", skip(self), ret)] + fn compute_external_query_constraints( + &self, + certainty: Certainty, + normalization_nested_goals: NestedNormalizationGoals, + ) -> ExternalConstraintsData { + // We only return region constraints once the certainty is `Yes`. This + // is necessary as we may drop nested goals on ambiguity, which may result + // in unconstrained inference variables in the region constraints. It also + // prevents us from emitting duplicate region constraints, avoiding some + // unnecessary work. This slightly weakens the leak check in case it uses + // region constraints from an ambiguous nested goal. This is tested in both + // `tests/ui/higher-ranked/leak-check/leak-check-in-selection-5-ambig.rs` and + // `tests/ui/higher-ranked/leak-check/leak-check-in-selection-6-ambig-unify.rs`. + let region_constraints = if certainty == Certainty::Yes { + self.delegate.make_deduplicated_outlives_constraints() + } else { + Default::default() + }; + + // We only return *newly defined* opaque types from canonical queries. + // + // Constraints for any existing opaque types are already tracked by changes + // to the `var_values`. + let opaque_types = self + .delegate + .clone_opaque_types_added_since(self.initial_opaque_types_storage_num_entries); + + ExternalConstraintsData { region_constraints, opaque_types, normalization_nested_goals } + } } /// Eagerly replace aliases with inference variables, emitting `AliasRelate` @@ -1363,7 +1557,7 @@ pub(super) fn evaluate_root_goal_for_proof_tree, let opaque_types = delegate.clone_opaque_types_lookup_table(); let (goal, opaque_types) = eager_resolve_vars(delegate, (goal, opaque_types)); - let (orig_values, canonical_goal) = EvalCtxt::canonicalize_goal(delegate, goal, opaque_types); + let (orig_values, canonical_goal) = canonicalize_goal(delegate, goal, opaque_types); let (canonical_result, final_revision) = delegate.cx().evaluate_root_goal_for_proof_tree_raw(canonical_goal); @@ -1380,7 +1574,7 @@ pub(super) fn evaluate_root_goal_for_proof_tree, Ok(response) => response, }; - let (normalization_nested_goals, _certainty) = EvalCtxt::instantiate_and_apply_query_response( + let (normalization_nested_goals, _certainty) = instantiate_and_apply_query_response( delegate, goal.param_env, &proof_tree.orig_values, diff --git a/compiler/rustc_next_trait_solver/src/solve/inspect/build.rs b/compiler/rustc_next_trait_solver/src/solve/inspect/build.rs index 2675ed0d0da65..4369148baf91d 100644 --- a/compiler/rustc_next_trait_solver/src/solve/inspect/build.rs +++ b/compiler/rustc_next_trait_solver/src/solve/inspect/build.rs @@ -10,8 +10,8 @@ use derive_where::derive_where; use rustc_type_ir::inherent::*; use rustc_type_ir::{self as ty, Interner}; +use crate::canonical; use crate::delegate::SolverDelegate; -use crate::solve::eval_ctxt::canonical; use crate::solve::{Certainty, Goal, GoalSource, QueryResult, inspect}; /// We need to know whether to build a prove tree while evaluating. We diff --git a/compiler/rustc_next_trait_solver/src/solve/inspect/mod.rs b/compiler/rustc_next_trait_solver/src/solve/inspect/mod.rs index 0d8c00601269b..65f32f1947fef 100644 --- a/compiler/rustc_next_trait_solver/src/solve/inspect/mod.rs +++ b/compiler/rustc_next_trait_solver/src/solve/inspect/mod.rs @@ -2,5 +2,3 @@ pub use rustc_type_ir::solve::inspect::*; mod build; pub(in crate::solve) use build::*; - -pub use crate::solve::eval_ctxt::canonical::instantiate_canonical_state; diff --git a/compiler/rustc_next_trait_solver/src/solve/mod.rs b/compiler/rustc_next_trait_solver/src/solve/mod.rs index fb900b592d13b..afb86aaf8ab21 100644 --- a/compiler/rustc_next_trait_solver/src/solve/mod.rs +++ b/compiler/rustc_next_trait_solver/src/solve/mod.rs @@ -380,25 +380,6 @@ where } } -fn response_no_constraints_raw( - cx: I, - max_universe: ty::UniverseIndex, - variables: I::CanonicalVarKinds, - certainty: Certainty, -) -> CanonicalResponse { - ty::Canonical { - max_universe, - variables, - value: Response { - var_values: ty::CanonicalVarValues::make_identity(cx, variables), - // FIXME: maybe we should store the "no response" version in cx, like - // we do for cx.types and stuff. - external_constraints: cx.mk_external_constraints(ExternalConstraintsData::default()), - certainty, - }, - } -} - /// The result of evaluating a goal. pub struct GoalEvaluation { /// The goal we've evaluated. This is the input goal, but potentially with its diff --git a/compiler/rustc_next_trait_solver/src/solve/search_graph.rs b/compiler/rustc_next_trait_solver/src/solve/search_graph.rs index 289325d70550c..aa9dfc9a9a265 100644 --- a/compiler/rustc_next_trait_solver/src/solve/search_graph.rs +++ b/compiler/rustc_next_trait_solver/src/solve/search_graph.rs @@ -6,6 +6,7 @@ use rustc_type_ir::search_graph::{self, PathKind}; use rustc_type_ir::solve::{CanonicalInput, Certainty, NoSolution, QueryResult}; use rustc_type_ir::{Interner, TypingMode}; +use crate::canonical::response_no_constraints_raw; use crate::delegate::SolverDelegate; use crate::solve::{ EvalCtxt, FIXPOINT_STEP_LIMIT, has_no_inference_or_external_constraints, inspect, @@ -127,7 +128,7 @@ fn response_no_constraints( input: CanonicalInput, certainty: Certainty, ) -> QueryResult { - Ok(super::response_no_constraints_raw( + Ok(response_no_constraints_raw( cx, input.canonical.max_universe, input.canonical.variables, diff --git a/compiler/rustc_session/src/config.rs b/compiler/rustc_session/src/config.rs index 297df7c2c9765..795cb2b2cfeba 100644 --- a/compiler/rustc_session/src/config.rs +++ b/compiler/rustc_session/src/config.rs @@ -1509,6 +1509,11 @@ impl Options { pub fn get_symbol_mangling_version(&self) -> SymbolManglingVersion { self.cg.symbol_mangling_version.unwrap_or(SymbolManglingVersion::Legacy) } + + #[inline] + pub fn autodiff_enabled(&self) -> bool { + self.unstable_opts.autodiff.contains(&AutoDiff::Enable) + } } impl UnstableOptions { diff --git a/compiler/rustc_session/src/session.rs b/compiler/rustc_session/src/session.rs index 3525c7c1d1a99..d0dd2cdac0c48 100644 --- a/compiler/rustc_session/src/session.rs +++ b/compiler/rustc_session/src/session.rs @@ -600,6 +600,13 @@ impl Session { /// Calculates the flavor of LTO to use for this compilation. pub fn lto(&self) -> config::Lto { + // Autodiff currently requires fat-lto to have access to the llvm-ir of all (indirectly) used functions and types. + // fat-lto is the easiest solution to this requirement, but quite expensive. + // FIXME(autodiff): Make autodiff also work with embed-bc instead of fat-lto. + if self.opts.autodiff_enabled() { + return config::Lto::Fat; + } + // If our target has codegen requirements ignore the command line if self.target.requires_lto { return config::Lto::Fat; diff --git a/compiler/rustc_trait_selection/src/solve/inspect/analyse.rs b/compiler/rustc_trait_selection/src/solve/inspect/analyse.rs index 086a7a44786d6..c010add0fc50f 100644 --- a/compiler/rustc_trait_selection/src/solve/inspect/analyse.rs +++ b/compiler/rustc_trait_selection/src/solve/inspect/analyse.rs @@ -18,9 +18,9 @@ use rustc_middle::traits::ObligationCause; use rustc_middle::traits::solve::{Certainty, Goal, GoalSource, NoSolution, QueryResult}; use rustc_middle::ty::{TyCtxt, VisitorResult, try_visit}; use rustc_middle::{bug, ty}; +use rustc_next_trait_solver::canonical::instantiate_canonical_state; use rustc_next_trait_solver::resolve::eager_resolve_vars; -use rustc_next_trait_solver::solve::inspect::{self, instantiate_canonical_state}; -use rustc_next_trait_solver::solve::{MaybeCause, SolverDelegateEvalExt as _}; +use rustc_next_trait_solver::solve::{MaybeCause, SolverDelegateEvalExt as _, inspect}; use rustc_span::Span; use tracing::instrument; diff --git a/compiler/rustc_windows_rc/src/lib.rs b/compiler/rustc_windows_rc/src/lib.rs index caa5e5ef27656..5e95557501ea2 100644 --- a/compiler/rustc_windows_rc/src/lib.rs +++ b/compiler/rustc_windows_rc/src/lib.rs @@ -35,8 +35,11 @@ pub fn compile_windows_resource_file( resources_dir.push("resources"); fs::create_dir_all(&resources_dir).unwrap(); - let resource_compiler = - find_resource_compiler(&env::var("CARGO_CFG_TARGET_ARCH").unwrap()).expect("found rc.exe"); + let resource_compiler = if let Ok(path) = env::var("RUSTC_WINDOWS_RC") { + path.into() + } else { + find_resource_compiler(&env::var("CARGO_CFG_TARGET_ARCH").unwrap()).expect("found rc.exe") + }; let rc_path = resources_dir.join(file_stem.with_extension("rc")); diff --git a/library/std/src/sys/fs/wasi.rs b/library/std/src/sys/fs/wasi.rs index b65d86de12a3d..0b65b9cb389df 100644 --- a/library/std/src/sys/fs/wasi.rs +++ b/library/std/src/sys/fs/wasi.rs @@ -848,7 +848,14 @@ fn remove_dir_all_recursive(parent: &WasiFd, path: &Path) -> io::Result<()> { // Iterate over all the entries in this directory, and travel recursively if // necessary - for entry in ReadDir::new(fd, dummy_root) { + // + // Note that all directory entries for this directory are read first before + // any removal is done. This works around the fact that the WASIp1 API for + // reading directories is not well-designed for handling mutations between + // invocations of reading a directory. By reading all the entries at once + // this ensures that, at least without concurrent modifications, it should + // be possible to delete everything. + for entry in ReadDir::new(fd, dummy_root).collect::>() { let entry = entry?; let path = crate::str::from_utf8(&entry.name).map_err(|_| { io::const_error!(io::ErrorKind::Uncategorized, "invalid utf-8 file name found") diff --git a/library/stdarch/.cirrus.yml b/library/stdarch/.cirrus.yml deleted file mode 100644 index a0ecc03b953fd..0000000000000 --- a/library/stdarch/.cirrus.yml +++ /dev/null @@ -1,16 +0,0 @@ -task: - name: x86_64-unknown-freebsd - freebsd_instance: - image_family: freebsd-13-4 - env: - # FIXME(freebsd): FreeBSD has a segfault when `RUST_BACKTRACE` is set - # https://github.com/rust-lang/rust/issues/132185 - RUST_BACKTRACE: "0" - setup_script: - - curl https://sh.rustup.rs -sSf --output rustup.sh - - sh rustup.sh --default-toolchain nightly -y - - . $HOME/.cargo/env - - rustup default nightly - test_script: - - . $HOME/.cargo/env - - cargo build --all diff --git a/library/stdarch/.github/workflows/main.yml b/library/stdarch/.github/workflows/main.yml index 048ce9864604e..b0d476f0e2ea5 100644 --- a/library/stdarch/.github/workflows/main.yml +++ b/library/stdarch/.github/workflows/main.yml @@ -117,6 +117,8 @@ jobs: os: windows-2025 - tuple: aarch64-pc-windows-msvc os: windows-11-arm + - tuple: arm64ec-pc-windows-msvc + os: windows-11-arm - tuple: x86_64-pc-windows-gnu os: windows-2025 # - tuple: i686-pc-windows-gnu @@ -207,14 +209,6 @@ jobs: rustup update nightly --no-self-update rustup default nightly shell: bash - if: matrix.target.os != 'windows-11-arm' - - name: Install Rust for `windows-11-arm` runners - # The arm runners don't have Rust pre-installed (https://github.com/actions/partner-runner-images/issues/77) - run: | - curl https://sh.rustup.rs | sh -s -- -y --default-toolchain nightly - echo "$HOME/.cargo/bin" >> $GITHUB_PATH - shell: bash - if: matrix.target.os == 'windows-11-arm' - run: rustup target add ${{ matrix.target.tuple }} shell: bash diff --git a/library/stdarch/Cargo.lock b/library/stdarch/Cargo.lock index 9df0791b86523..a10a456acce1d 100644 --- a/library/stdarch/Cargo.lock +++ b/library/stdarch/Cargo.lock @@ -13,9 +13,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.19" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "301af1932e46185686725e0fad2f8f2aa7da69dd70bf6ecc44d6b703844a3933" +checksum = "3ae563653d1938f79b1ab1b5e668c87c76a9930414574a6583a7b7e11a8e6192" dependencies = [ "anstyle", "anstyle-parse", @@ -43,29 +43,29 @@ dependencies = [ [[package]] name = "anstyle-query" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c8bdeb6047d8983be085bab0ba1472e6dc604e7041dbf6fcd5e71523014fae9" +checksum = "9e231f6134f61b71076a3eab506c379d4f36122f2af15a9ff04415ea4c3339e2" dependencies = [ - "windows-sys", + "windows-sys 0.60.2", ] [[package]] name = "anstyle-wincon" -version = "3.0.9" +version = "3.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "403f75924867bb1033c59fbf0797484329750cfbe3c4325cd33127941fabc882" +checksum = "3e0633414522a32ffaac8ac6cc8f748e090c5717661fddeea04219e2344f5f2a" dependencies = [ "anstyle", "once_cell_polyfill", - "windows-sys", + "windows-sys 0.60.2", ] [[package]] name = "anyhow" -version = "1.0.98" +version = "1.0.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" +checksum = "b0674a1ddeecb70197781e945de4b3b8ffb61fa939a5597bcf48503737663100" [[package]] name = "assert-instr-macro" @@ -84,30 +84,31 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "bitflags" -version = "2.9.1" +version = "2.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" +checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394" [[package]] name = "cc" -version = "1.2.31" +version = "1.2.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3a42d84bb6b69d3a8b3eaacf0d88f179e1929695e1ad012b6cf64d9caaa5fd2" +checksum = "5252b3d2648e5eedbc1a6f501e3c795e07025c1e93bbf8bbdd6eef7f447a6d54" dependencies = [ + "find-msvc-tools", "shlex", ] [[package]] name = "cfg-if" -version = "1.0.1" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" +checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9" [[package]] name = "clap" -version = "4.5.42" +version = "4.5.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed87a9d530bb41a67537289bafcac159cb3ee28460e0a4571123d2a778a6a882" +checksum = "7eac00902d9d136acd712710d71823fb8ac8004ca445a89e73a41d45aa712931" dependencies = [ "clap_builder", "clap_derive", @@ -115,9 +116,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.42" +version = "4.5.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64f4f3f3c77c94aff3c7e9aac9a2ca1974a5adf392a8bb751e827d6d127ab966" +checksum = "2ad9bbf750e73b5884fb8a211a9424a1906c1e156724260fdae972f31d70e1d6" dependencies = [ "anstream", "anstyle", @@ -127,9 +128,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.41" +version = "4.5.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef4f52386a59ca4c860f7393bcf8abd8dfd91ecccc0f774635ff68e92eeef491" +checksum = "bbfd7eae0b0f1a6e63d4b13c9c478de77c2eb546fba158ad50b4203dc24b9f9c" dependencies = [ "heck", "proc-macro2", @@ -258,6 +259,12 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" +[[package]] +name = "find-msvc-tools" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fd99930f64d146689264c637b5af2f0233a933bef0d8570e2526bf9e083192d" + [[package]] name = "fnv" version = "1.0.7" @@ -283,9 +290,9 @@ checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "hashbrown" -version = "0.15.4" +version = "0.15.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" [[package]] name = "heck" @@ -323,12 +330,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" +checksum = "f2481980430f9f78649238835720ddccc57e52df14ffce1c6f37391d61b563e9" dependencies = [ "equivalent", - "hashbrown 0.15.4", + "hashbrown 0.15.5", ] [[package]] @@ -353,7 +360,7 @@ checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" dependencies = [ "hermit-abi", "libc", - "windows-sys", + "windows-sys 0.59.0", ] [[package]] @@ -379,9 +386,9 @@ checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "libc" -version = "0.2.174" +version = "0.2.175" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" +checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543" [[package]] name = "linked-hash-map" @@ -391,9 +398,9 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "log" -version = "0.4.27" +version = "0.4.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" +checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" [[package]] name = "memchr" @@ -428,9 +435,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.95" +version = "1.0.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" +checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" dependencies = [ "unicode-ident", ] @@ -497,9 +504,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f" dependencies = [ "either", "rayon-core", @@ -507,9 +514,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.12.1" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91" dependencies = [ "crossbeam-deque", "crossbeam-utils", @@ -517,9 +524,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.11.1" +version = "1.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +checksum = "23d7fd106d8c02486a8d64e778353d1cffe08ce79ac2e82f540c86d0facf6912" dependencies = [ "aho-corasick", "memchr", @@ -529,9 +536,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +checksum = "6b9458fa0bfeeac22b5ca447c63aaf45f28439a709ccd244698632f9aa6394d6" dependencies = [ "aho-corasick", "memchr", @@ -540,9 +547,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" +checksum = "caf4aa5b0f434c91fe5c7f1ecb6a5ece2130b02ad2a590589dda5146df959001" [[package]] name = "rustc-demangle" @@ -593,9 +600,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.142" +version = "1.0.143" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "030fedb782600dcbd6f02d479bf0d817ac3bb40d644745b769d6a96bc3afc5a7" +checksum = "d401abef1d108fbd9cbaebc3e46611f4b1021f714a0597a71f41ee463f5f4a5a" dependencies = [ "itoa", "memchr", @@ -715,9 +722,9 @@ checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "syn" -version = "2.0.104" +version = "2.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" +checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" dependencies = [ "proc-macro2", "quote", @@ -774,7 +781,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "161296c618fa2d63f6ed5fffd1112937e803cb9ec71b32b01a76321555660917" dependencies = [ "bitflags", - "indexmap 2.10.0", + "indexmap 2.11.0", "semver", ] @@ -791,20 +798,35 @@ dependencies = [ [[package]] name = "winapi-util" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" +checksum = "0978bf7171b3d90bac376700cb56d606feb40f251a475a5d6634613564460b22" dependencies = [ - "windows-sys", + "windows-sys 0.60.2", ] +[[package]] +name = "windows-link" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" + [[package]] name = "windows-sys" version = "0.59.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" dependencies = [ - "windows-targets", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.3", ] [[package]] @@ -813,14 +835,31 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_gnullvm", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91" +dependencies = [ + "windows-link", + "windows_aarch64_gnullvm 0.53.0", + "windows_aarch64_msvc 0.53.0", + "windows_i686_gnu 0.53.0", + "windows_i686_gnullvm 0.53.0", + "windows_i686_msvc 0.53.0", + "windows_x86_64_gnu 0.53.0", + "windows_x86_64_gnullvm 0.53.0", + "windows_x86_64_msvc 0.53.0", ] [[package]] @@ -829,48 +868,96 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" + [[package]] name = "windows_aarch64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" + [[package]] name = "windows_i686_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" +[[package]] +name = "windows_i686_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" + [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" + [[package]] name = "windows_i686_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" +[[package]] +name = "windows_i686_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" + [[package]] name = "windows_x86_64_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" + [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" + [[package]] name = "windows_x86_64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" + [[package]] name = "yaml-rust" version = "0.4.5" @@ -882,18 +969,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.26" +version = "0.8.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" +checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.26" +version = "0.8.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" +checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" dependencies = [ "proc-macro2", "quote", diff --git a/library/stdarch/ci/docker/loongarch64-unknown-linux-gnu/Dockerfile b/library/stdarch/ci/docker/loongarch64-unknown-linux-gnu/Dockerfile index b5c6874ca52ed..e803dae1006a0 100644 --- a/library/stdarch/ci/docker/loongarch64-unknown-linux-gnu/Dockerfile +++ b/library/stdarch/ci/docker/loongarch64-unknown-linux-gnu/Dockerfile @@ -2,11 +2,11 @@ FROM ubuntu:25.10 RUN apt-get update && \ apt-get install -y --no-install-recommends \ - gcc libc6-dev qemu-user-static ca-certificates \ + gcc libc6-dev qemu-user ca-certificates \ gcc-loongarch64-linux-gnu libc6-dev-loong64-cross ENV CARGO_TARGET_LOONGARCH64_UNKNOWN_LINUX_GNU_LINKER=loongarch64-linux-gnu-gcc \ - CARGO_TARGET_LOONGARCH64_UNKNOWN_LINUX_GNU_RUNNER="qemu-loongarch64-static -cpu max -L /usr/loongarch64-linux-gnu" \ + CARGO_TARGET_LOONGARCH64_UNKNOWN_LINUX_GNU_RUNNER="qemu-loongarch64 -cpu max -L /usr/loongarch64-linux-gnu" \ OBJDUMP=loongarch64-linux-gnu-objdump \ STDARCH_TEST_SKIP_FEATURE=frecipe diff --git a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs index bc4c438038dc5..855261aaecfd0 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs @@ -188,6 +188,7 @@ pub fn vabds_f32(a: f32, b: f32) -> f32 { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fabd))] pub fn vabdh_f16(a: f16, b: f16) -> f16 { unsafe { simd_extract!(vabd_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) } @@ -947,6 +948,7 @@ pub fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fcadd))] pub fn vcadd_rot270_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe extern "unadjusted" { @@ -964,6 +966,7 @@ pub fn vcadd_rot270_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fcadd))] pub fn vcaddq_rot270_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unsafe extern "unadjusted" { @@ -1029,6 +1032,7 @@ pub fn vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fcadd))] pub fn vcadd_rot90_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe extern "unadjusted" { @@ -1046,6 +1050,7 @@ pub fn vcadd_rot90_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fcadd))] pub fn vcaddq_rot90_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unsafe extern "unadjusted" { @@ -1175,6 +1180,7 @@ pub fn vcages_f32(a: f32, b: f32) -> u32 { #[cfg_attr(test, assert_instr(facge))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcageh_f16(a: f16, b: f16) -> u16 { unsafe extern "unadjusted" { #[cfg_attr( @@ -1255,6 +1261,7 @@ pub fn vcagts_f32(a: f32, b: f32) -> u32 { #[cfg_attr(test, assert_instr(facgt))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcagth_f16(a: f16, b: f16) -> u16 { unsafe extern "unadjusted" { #[cfg_attr( @@ -1307,6 +1314,7 @@ pub fn vcales_f32(a: f32, b: f32) -> u32 { #[cfg_attr(test, assert_instr(facge))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcaleh_f16(a: f16, b: f16) -> u16 { vcageh_f16(b, a) } @@ -1352,6 +1360,7 @@ pub fn vcalts_f32(a: f32, b: f32) -> u32 { #[cfg_attr(test, assert_instr(facgt))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcalth_f16(a: f16, b: f16) -> u16 { vcagth_f16(b, a) } @@ -1469,6 +1478,7 @@ pub fn vceqd_u64(a: u64, b: u64) -> u64 { #[cfg_attr(test, assert_instr(fcmp))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vceqh_f16(a: f16, b: f16) -> u16 { unsafe { simd_extract!(vceq_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) } } @@ -1478,6 +1488,7 @@ pub fn vceqh_f16(a: f16, b: f16) -> u16 { #[cfg_attr(test, assert_instr(fcmeq))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vceqz_f16(a: float16x4_t) -> uint16x4_t { let b: f16x4 = f16x4::new(0.0, 0.0, 0.0, 0.0); unsafe { simd_eq(a, transmute(b)) } @@ -1488,6 +1499,7 @@ pub fn vceqz_f16(a: float16x4_t) -> uint16x4_t { #[cfg_attr(test, assert_instr(fcmeq))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vceqzq_f16(a: float16x8_t) -> uint16x8_t { let b: f16x8 = f16x8::new(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0); unsafe { simd_eq(a, transmute(b)) } @@ -1756,6 +1768,7 @@ pub fn vceqzd_u64(a: u64) -> u64 { #[cfg_attr(test, assert_instr(fcmp))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vceqzh_f16(a: f16) -> u16 { unsafe { simd_extract!(vceqz_f16(vdup_n_f16(a)), 0) } } @@ -1873,6 +1886,7 @@ pub fn vcged_u64(a: u64, b: u64) -> u64 { #[cfg_attr(test, assert_instr(fcmp))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcgeh_f16(a: f16, b: f16) -> u16 { unsafe { simd_extract!(vcge_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) } } @@ -2029,6 +2043,7 @@ pub fn vcgezd_s64(a: i64) -> u64 { #[cfg_attr(test, assert_instr(fcmp))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcgezh_f16(a: f16) -> u16 { unsafe { simd_extract!(vcgez_f16(vdup_n_f16(a)), 0) } } @@ -2128,6 +2143,7 @@ pub fn vcgtd_u64(a: u64, b: u64) -> u64 { #[cfg_attr(test, assert_instr(fcmp))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcgth_f16(a: f16, b: f16) -> u16 { unsafe { simd_extract!(vcgt_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) } } @@ -2284,6 +2300,7 @@ pub fn vcgtzd_s64(a: i64) -> u64 { #[cfg_attr(test, assert_instr(fcmp))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcgtzh_f16(a: f16) -> u16 { unsafe { simd_extract!(vcgtz_f16(vdup_n_f16(a)), 0) } } @@ -2383,6 +2400,7 @@ pub fn vcled_s64(a: i64, b: i64) -> u64 { #[cfg_attr(test, assert_instr(fcmp))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcleh_f16(a: f16, b: f16) -> u16 { unsafe { simd_extract!(vcle_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) } } @@ -2539,6 +2557,7 @@ pub fn vclezd_s64(a: i64) -> u64 { #[cfg_attr(test, assert_instr(fcmp))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vclezh_f16(a: f16) -> u16 { unsafe { simd_extract!(vclez_f16(vdup_n_f16(a)), 0) } } @@ -2620,6 +2639,7 @@ pub fn vcltd_s64(a: i64, b: i64) -> u64 { #[cfg_attr(test, assert_instr(fcmp))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vclth_f16(a: f16, b: f16) -> u16 { unsafe { simd_extract!(vclt_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) } } @@ -2794,6 +2814,7 @@ pub fn vcltzd_s64(a: i64) -> u64 { #[cfg_attr(test, assert_instr(fcmp))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcltzh_f16(a: f16) -> u16 { unsafe { simd_extract!(vcltz_f16(vdup_n_f16(a)), 0) } } @@ -2803,6 +2824,7 @@ pub fn vcltzh_f16(a: f16) -> u16 { #[target_feature(enable = "neon,fcma")] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fcmla))] pub fn vcmla_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t { unsafe extern "unadjusted" { @@ -2820,6 +2842,7 @@ pub fn vcmla_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t #[target_feature(enable = "neon,fcma")] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fcmla))] pub fn vcmlaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t { unsafe extern "unadjusted" { @@ -2887,6 +2910,7 @@ pub fn vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcmla_lane_f16( a: float16x4_t, b: float16x4_t, @@ -2915,6 +2939,7 @@ pub fn vcmla_lane_f16( #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcmlaq_lane_f16( a: float16x8_t, b: float16x8_t, @@ -2992,6 +3017,7 @@ pub fn vcmlaq_lane_f32( #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcmla_laneq_f16( a: float16x4_t, b: float16x4_t, @@ -3020,6 +3046,7 @@ pub fn vcmla_laneq_f16( #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcmlaq_laneq_f16( a: float16x8_t, b: float16x8_t, @@ -3095,6 +3122,7 @@ pub fn vcmlaq_laneq_f32( #[target_feature(enable = "neon,fcma")] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fcmla))] pub fn vcmla_rot180_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t { unsafe extern "unadjusted" { @@ -3112,6 +3140,7 @@ pub fn vcmla_rot180_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float #[target_feature(enable = "neon,fcma")] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fcmla))] pub fn vcmlaq_rot180_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t { unsafe extern "unadjusted" { @@ -3179,6 +3208,7 @@ pub fn vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> floa #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcmla_rot180_lane_f16( a: float16x4_t, b: float16x4_t, @@ -3207,6 +3237,7 @@ pub fn vcmla_rot180_lane_f16( #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcmlaq_rot180_lane_f16( a: float16x8_t, b: float16x8_t, @@ -3284,6 +3315,7 @@ pub fn vcmlaq_rot180_lane_f32( #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcmla_rot180_laneq_f16( a: float16x4_t, b: float16x4_t, @@ -3312,6 +3344,7 @@ pub fn vcmla_rot180_laneq_f16( #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcmlaq_rot180_laneq_f16( a: float16x8_t, b: float16x8_t, @@ -3387,6 +3420,7 @@ pub fn vcmlaq_rot180_laneq_f32( #[target_feature(enable = "neon,fcma")] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fcmla))] pub fn vcmla_rot270_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t { unsafe extern "unadjusted" { @@ -3404,6 +3438,7 @@ pub fn vcmla_rot270_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float #[target_feature(enable = "neon,fcma")] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fcmla))] pub fn vcmlaq_rot270_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t { unsafe extern "unadjusted" { @@ -3471,6 +3506,7 @@ pub fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> floa #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcmla_rot270_lane_f16( a: float16x4_t, b: float16x4_t, @@ -3499,6 +3535,7 @@ pub fn vcmla_rot270_lane_f16( #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcmlaq_rot270_lane_f16( a: float16x8_t, b: float16x8_t, @@ -3576,6 +3613,7 @@ pub fn vcmlaq_rot270_lane_f32( #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcmla_rot270_laneq_f16( a: float16x4_t, b: float16x4_t, @@ -3604,6 +3642,7 @@ pub fn vcmla_rot270_laneq_f16( #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcmlaq_rot270_laneq_f16( a: float16x8_t, b: float16x8_t, @@ -3679,6 +3718,7 @@ pub fn vcmlaq_rot270_laneq_f32( #[target_feature(enable = "neon,fcma")] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fcmla))] pub fn vcmla_rot90_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t { unsafe extern "unadjusted" { @@ -3696,6 +3736,7 @@ pub fn vcmla_rot90_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float1 #[target_feature(enable = "neon,fcma")] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fcmla))] pub fn vcmlaq_rot90_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t { unsafe extern "unadjusted" { @@ -3763,6 +3804,7 @@ pub fn vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcmla_rot90_lane_f16( a: float16x4_t, b: float16x4_t, @@ -3791,6 +3833,7 @@ pub fn vcmla_rot90_lane_f16( #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcmlaq_rot90_lane_f16( a: float16x8_t, b: float16x8_t, @@ -3868,6 +3911,7 @@ pub fn vcmlaq_rot90_lane_f32( #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcmla_rot90_laneq_f16( a: float16x4_t, b: float16x4_t, @@ -3896,6 +3940,7 @@ pub fn vcmla_rot90_laneq_f16( #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcmlaq_rot90_laneq_f16( a: float16x8_t, b: float16x8_t, @@ -7161,6 +7206,7 @@ pub fn vcvtq_f64_u64(a: uint64x2_t) -> float64x2_t { #[cfg_attr(test, assert_instr(fcvtn2))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvt_high_f16_f32(a: float16x4_t, b: float32x4_t) -> float16x8_t { vcombine_f16(a, vcvt_f16_f32(b)) } @@ -7170,6 +7216,7 @@ pub fn vcvt_high_f16_f32(a: float16x4_t, b: float32x4_t) -> float16x8_t { #[cfg_attr(test, assert_instr(fcvtl2))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvt_high_f32_f16(a: float16x8_t) -> float32x4_t { vcvt_f32_f16(vget_high_f16(a)) } @@ -7408,6 +7455,7 @@ pub fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(fcvtas))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvta_s16_f16(a: float16x4_t) -> int16x4_t { unsafe extern "unadjusted" { #[cfg_attr( @@ -7424,6 +7472,7 @@ pub fn vcvta_s16_f16(a: float16x4_t) -> int16x4_t { #[cfg_attr(test, assert_instr(fcvtas))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtaq_s16_f16(a: float16x8_t) -> int16x8_t { unsafe extern "unadjusted" { #[cfg_attr( @@ -7504,6 +7553,7 @@ pub fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t { #[cfg_attr(test, assert_instr(fcvtau))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvta_u16_f16(a: float16x4_t) -> uint16x4_t { unsafe extern "unadjusted" { #[cfg_attr( @@ -7520,6 +7570,7 @@ pub fn vcvta_u16_f16(a: float16x4_t) -> uint16x4_t { #[cfg_attr(test, assert_instr(fcvtau))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtaq_u16_f16(a: float16x8_t) -> uint16x8_t { unsafe extern "unadjusted" { #[cfg_attr( @@ -7600,6 +7651,7 @@ pub fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(fcvtas))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtah_s16_f16(a: f16) -> i16 { vcvtah_s32_f16(a) as i16 } @@ -7609,6 +7661,7 @@ pub fn vcvtah_s16_f16(a: f16) -> i16 { #[cfg_attr(test, assert_instr(fcvtas))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtah_s32_f16(a: f16) -> i32 { unsafe extern "unadjusted" { #[cfg_attr( @@ -7625,6 +7678,7 @@ pub fn vcvtah_s32_f16(a: f16) -> i32 { #[cfg_attr(test, assert_instr(fcvtas))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtah_s64_f16(a: f16) -> i64 { unsafe extern "unadjusted" { #[cfg_attr( @@ -7641,6 +7695,7 @@ pub fn vcvtah_s64_f16(a: f16) -> i64 { #[cfg_attr(test, assert_instr(fcvtau))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtah_u16_f16(a: f16) -> u16 { vcvtah_u32_f16(a) as u16 } @@ -7650,6 +7705,7 @@ pub fn vcvtah_u16_f16(a: f16) -> u16 { #[cfg_attr(test, assert_instr(fcvtau))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtah_u32_f16(a: f16) -> u32 { unsafe extern "unadjusted" { #[cfg_attr( @@ -7666,6 +7722,7 @@ pub fn vcvtah_u32_f16(a: f16) -> u32 { #[cfg_attr(test, assert_instr(fcvtau))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtah_u64_f16(a: f16) -> u64 { unsafe extern "unadjusted" { #[cfg_attr( @@ -7764,6 +7821,7 @@ pub fn vcvts_f32_s32(a: i32) -> f32 { #[cfg_attr(test, assert_instr(scvtf))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvth_f16_s16(a: i16) -> f16 { a as f16 } @@ -7773,6 +7831,7 @@ pub fn vcvth_f16_s16(a: i16) -> f16 { #[cfg_attr(test, assert_instr(scvtf))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvth_f16_s32(a: i32) -> f16 { a as f16 } @@ -7782,6 +7841,7 @@ pub fn vcvth_f16_s32(a: i32) -> f16 { #[cfg_attr(test, assert_instr(scvtf))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvth_f16_s64(a: i64) -> f16 { a as f16 } @@ -7791,6 +7851,7 @@ pub fn vcvth_f16_s64(a: i64) -> f16 { #[cfg_attr(test, assert_instr(ucvtf))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvth_f16_u16(a: u16) -> f16 { a as f16 } @@ -7800,6 +7861,7 @@ pub fn vcvth_f16_u16(a: u16) -> f16 { #[cfg_attr(test, assert_instr(ucvtf))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvth_f16_u32(a: u32) -> f16 { a as f16 } @@ -7809,6 +7871,7 @@ pub fn vcvth_f16_u32(a: u32) -> f16 { #[cfg_attr(test, assert_instr(ucvtf))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvth_f16_u64(a: u64) -> f16 { a as f16 } @@ -7819,6 +7882,7 @@ pub fn vcvth_f16_u64(a: u64) -> f16 { #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvth_n_f16_s16(a: i16) -> f16 { static_assert!(N >= 1 && N <= 16); vcvth_n_f16_s32::(a as i32) @@ -7830,6 +7894,7 @@ pub fn vcvth_n_f16_s16(a: i16) -> f16 { #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvth_n_f16_s32(a: i32) -> f16 { static_assert!(N >= 1 && N <= 16); unsafe extern "unadjusted" { @@ -7848,6 +7913,7 @@ pub fn vcvth_n_f16_s32(a: i32) -> f16 { #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvth_n_f16_s64(a: i64) -> f16 { static_assert!(N >= 1 && N <= 16); unsafe extern "unadjusted" { @@ -7866,6 +7932,7 @@ pub fn vcvth_n_f16_s64(a: i64) -> f16 { #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvth_n_f16_u16(a: u16) -> f16 { static_assert!(N >= 1 && N <= 16); vcvth_n_f16_u32::(a as u32) @@ -7877,6 +7944,7 @@ pub fn vcvth_n_f16_u16(a: u16) -> f16 { #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvth_n_f16_u32(a: u32) -> f16 { static_assert!(N >= 1 && N <= 16); unsafe extern "unadjusted" { @@ -7895,6 +7963,7 @@ pub fn vcvth_n_f16_u32(a: u32) -> f16 { #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvth_n_f16_u64(a: u64) -> f16 { static_assert!(N >= 1 && N <= 16); unsafe extern "unadjusted" { @@ -7913,6 +7982,7 @@ pub fn vcvth_n_f16_u64(a: u64) -> f16 { #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvth_n_s16_f16(a: f16) -> i16 { static_assert!(N >= 1 && N <= 16); vcvth_n_s32_f16::(a) as i16 @@ -7924,6 +7994,7 @@ pub fn vcvth_n_s16_f16(a: f16) -> i16 { #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvth_n_s32_f16(a: f16) -> i32 { static_assert!(N >= 1 && N <= 16); unsafe extern "unadjusted" { @@ -7942,6 +8013,7 @@ pub fn vcvth_n_s32_f16(a: f16) -> i32 { #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvth_n_s64_f16(a: f16) -> i64 { static_assert!(N >= 1 && N <= 16); unsafe extern "unadjusted" { @@ -7960,6 +8032,7 @@ pub fn vcvth_n_s64_f16(a: f16) -> i64 { #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvth_n_u16_f16(a: f16) -> u16 { static_assert!(N >= 1 && N <= 16); vcvth_n_u32_f16::(a) as u16 @@ -7971,6 +8044,7 @@ pub fn vcvth_n_u16_f16(a: f16) -> u16 { #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvth_n_u32_f16(a: f16) -> u32 { static_assert!(N >= 1 && N <= 16); unsafe extern "unadjusted" { @@ -7989,6 +8063,7 @@ pub fn vcvth_n_u32_f16(a: f16) -> u32 { #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvth_n_u64_f16(a: f16) -> u64 { static_assert!(N >= 1 && N <= 16); unsafe extern "unadjusted" { @@ -8006,6 +8081,7 @@ pub fn vcvth_n_u64_f16(a: f16) -> u64 { #[cfg_attr(test, assert_instr(fcvtzs))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvth_s16_f16(a: f16) -> i16 { a as i16 } @@ -8015,6 +8091,7 @@ pub fn vcvth_s16_f16(a: f16) -> i16 { #[cfg_attr(test, assert_instr(fcvtzs))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvth_s32_f16(a: f16) -> i32 { a as i32 } @@ -8024,6 +8101,7 @@ pub fn vcvth_s32_f16(a: f16) -> i32 { #[cfg_attr(test, assert_instr(fcvtzs))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvth_s64_f16(a: f16) -> i64 { a as i64 } @@ -8033,6 +8111,7 @@ pub fn vcvth_s64_f16(a: f16) -> i64 { #[cfg_attr(test, assert_instr(fcvtzu))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvth_u16_f16(a: f16) -> u16 { a as u16 } @@ -8042,6 +8121,7 @@ pub fn vcvth_u16_f16(a: f16) -> u16 { #[cfg_attr(test, assert_instr(fcvtzu))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvth_u32_f16(a: f16) -> u32 { a as u32 } @@ -8051,6 +8131,7 @@ pub fn vcvth_u32_f16(a: f16) -> u32 { #[cfg_attr(test, assert_instr(fcvtzu))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvth_u64_f16(a: f16) -> u64 { a as u64 } @@ -8060,6 +8141,7 @@ pub fn vcvth_u64_f16(a: f16) -> u64 { #[cfg_attr(test, assert_instr(fcvtms))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtm_s16_f16(a: float16x4_t) -> int16x4_t { unsafe extern "unadjusted" { #[cfg_attr( @@ -8076,6 +8158,7 @@ pub fn vcvtm_s16_f16(a: float16x4_t) -> int16x4_t { #[cfg_attr(test, assert_instr(fcvtms))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtmq_s16_f16(a: float16x8_t) -> int16x8_t { unsafe extern "unadjusted" { #[cfg_attr( @@ -8156,6 +8239,7 @@ pub fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t { #[cfg_attr(test, assert_instr(fcvtmu))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtm_u16_f16(a: float16x4_t) -> uint16x4_t { unsafe extern "unadjusted" { #[cfg_attr( @@ -8172,6 +8256,7 @@ pub fn vcvtm_u16_f16(a: float16x4_t) -> uint16x4_t { #[cfg_attr(test, assert_instr(fcvtmu))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtmq_u16_f16(a: float16x8_t) -> uint16x8_t { unsafe extern "unadjusted" { #[cfg_attr( @@ -8252,6 +8337,7 @@ pub fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(fcvtms))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtmh_s16_f16(a: f16) -> i16 { vcvtmh_s32_f16(a) as i16 } @@ -8261,6 +8347,7 @@ pub fn vcvtmh_s16_f16(a: f16) -> i16 { #[cfg_attr(test, assert_instr(fcvtms))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtmh_s32_f16(a: f16) -> i32 { unsafe extern "unadjusted" { #[cfg_attr( @@ -8277,6 +8364,7 @@ pub fn vcvtmh_s32_f16(a: f16) -> i32 { #[cfg_attr(test, assert_instr(fcvtms))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtmh_s64_f16(a: f16) -> i64 { unsafe extern "unadjusted" { #[cfg_attr( @@ -8293,6 +8381,7 @@ pub fn vcvtmh_s64_f16(a: f16) -> i64 { #[cfg_attr(test, assert_instr(fcvtmu))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtmh_u16_f16(a: f16) -> u16 { vcvtmh_u32_f16(a) as u16 } @@ -8302,6 +8391,7 @@ pub fn vcvtmh_u16_f16(a: f16) -> u16 { #[cfg_attr(test, assert_instr(fcvtmu))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtmh_u32_f16(a: f16) -> u32 { unsafe extern "unadjusted" { #[cfg_attr( @@ -8318,6 +8408,7 @@ pub fn vcvtmh_u32_f16(a: f16) -> u32 { #[cfg_attr(test, assert_instr(fcvtmu))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtmh_u64_f16(a: f16) -> u64 { unsafe extern "unadjusted" { #[cfg_attr( @@ -8398,6 +8489,7 @@ pub fn vcvtmd_u64_f64(a: f64) -> u64 { #[cfg_attr(test, assert_instr(fcvtns))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtn_s16_f16(a: float16x4_t) -> int16x4_t { unsafe extern "unadjusted" { #[cfg_attr( @@ -8414,6 +8506,7 @@ pub fn vcvtn_s16_f16(a: float16x4_t) -> int16x4_t { #[cfg_attr(test, assert_instr(fcvtns))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtnq_s16_f16(a: float16x8_t) -> int16x8_t { unsafe extern "unadjusted" { #[cfg_attr( @@ -8494,6 +8587,7 @@ pub fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t { #[cfg_attr(test, assert_instr(fcvtnu))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtn_u16_f16(a: float16x4_t) -> uint16x4_t { unsafe extern "unadjusted" { #[cfg_attr( @@ -8510,6 +8604,7 @@ pub fn vcvtn_u16_f16(a: float16x4_t) -> uint16x4_t { #[cfg_attr(test, assert_instr(fcvtnu))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtnq_u16_f16(a: float16x8_t) -> uint16x8_t { unsafe extern "unadjusted" { #[cfg_attr( @@ -8590,6 +8685,7 @@ pub fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(fcvtns))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtnh_s16_f16(a: f16) -> i16 { vcvtnh_s32_f16(a) as i16 } @@ -8599,6 +8695,7 @@ pub fn vcvtnh_s16_f16(a: f16) -> i16 { #[cfg_attr(test, assert_instr(fcvtns))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtnh_s32_f16(a: f16) -> i32 { unsafe extern "unadjusted" { #[cfg_attr( @@ -8615,6 +8712,7 @@ pub fn vcvtnh_s32_f16(a: f16) -> i32 { #[cfg_attr(test, assert_instr(fcvtns))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtnh_s64_f16(a: f16) -> i64 { unsafe extern "unadjusted" { #[cfg_attr( @@ -8631,6 +8729,7 @@ pub fn vcvtnh_s64_f16(a: f16) -> i64 { #[cfg_attr(test, assert_instr(fcvtnu))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtnh_u16_f16(a: f16) -> u16 { vcvtnh_u32_f16(a) as u16 } @@ -8640,6 +8739,7 @@ pub fn vcvtnh_u16_f16(a: f16) -> u16 { #[cfg_attr(test, assert_instr(fcvtnu))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtnh_u32_f16(a: f16) -> u32 { unsafe extern "unadjusted" { #[cfg_attr( @@ -8656,6 +8756,7 @@ pub fn vcvtnh_u32_f16(a: f16) -> u32 { #[cfg_attr(test, assert_instr(fcvtnu))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtnh_u64_f16(a: f16) -> u64 { unsafe extern "unadjusted" { #[cfg_attr( @@ -8736,6 +8837,7 @@ pub fn vcvtnd_u64_f64(a: f64) -> u64 { #[cfg_attr(test, assert_instr(fcvtps))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtp_s16_f16(a: float16x4_t) -> int16x4_t { unsafe extern "unadjusted" { #[cfg_attr( @@ -8752,6 +8854,7 @@ pub fn vcvtp_s16_f16(a: float16x4_t) -> int16x4_t { #[cfg_attr(test, assert_instr(fcvtps))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtpq_s16_f16(a: float16x8_t) -> int16x8_t { unsafe extern "unadjusted" { #[cfg_attr( @@ -8832,6 +8935,7 @@ pub fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t { #[cfg_attr(test, assert_instr(fcvtpu))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtp_u16_f16(a: float16x4_t) -> uint16x4_t { unsafe extern "unadjusted" { #[cfg_attr( @@ -8848,6 +8952,7 @@ pub fn vcvtp_u16_f16(a: float16x4_t) -> uint16x4_t { #[cfg_attr(test, assert_instr(fcvtpu))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtpq_u16_f16(a: float16x8_t) -> uint16x8_t { unsafe extern "unadjusted" { #[cfg_attr( @@ -8928,6 +9033,7 @@ pub fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t { #[cfg_attr(test, assert_instr(fcvtps))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtph_s16_f16(a: f16) -> i16 { vcvtph_s32_f16(a) as i16 } @@ -8937,6 +9043,7 @@ pub fn vcvtph_s16_f16(a: f16) -> i16 { #[cfg_attr(test, assert_instr(fcvtps))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtph_s32_f16(a: f16) -> i32 { unsafe extern "unadjusted" { #[cfg_attr( @@ -8953,6 +9060,7 @@ pub fn vcvtph_s32_f16(a: f16) -> i32 { #[cfg_attr(test, assert_instr(fcvtps))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtph_s64_f16(a: f16) -> i64 { unsafe extern "unadjusted" { #[cfg_attr( @@ -8969,6 +9077,7 @@ pub fn vcvtph_s64_f16(a: f16) -> i64 { #[cfg_attr(test, assert_instr(fcvtpu))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtph_u16_f16(a: f16) -> u16 { vcvtph_u32_f16(a) as u16 } @@ -8978,6 +9087,7 @@ pub fn vcvtph_u16_f16(a: f16) -> u16 { #[cfg_attr(test, assert_instr(fcvtpu))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtph_u32_f16(a: f16) -> u32 { unsafe extern "unadjusted" { #[cfg_attr( @@ -8994,6 +9104,7 @@ pub fn vcvtph_u32_f16(a: f16) -> u32 { #[cfg_attr(test, assert_instr(fcvtpu))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtph_u64_f16(a: f16) -> u64 { unsafe extern "unadjusted" { #[cfg_attr( @@ -9305,6 +9416,7 @@ pub fn vcvtxd_f32_f64(a: f64) -> f32 { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fdiv))] pub fn vdiv_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe { simd_div(a, b) } @@ -9314,6 +9426,7 @@ pub fn vdiv_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fdiv))] pub fn vdivq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unsafe { simd_div(a, b) } @@ -9359,6 +9472,7 @@ pub fn vdivq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(nop))] pub fn vdivh_f16(a: f16, b: f16) -> f16 { a / b @@ -9608,6 +9722,7 @@ pub fn vdupd_lane_u64(a: uint64x1_t) -> u64 { #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vduph_lane_f16(a: float16x4_t) -> f16 { static_assert_uimm_bits!(N, 2); unsafe { simd_extract!(a, N as u32) } @@ -9619,6 +9734,7 @@ pub fn vduph_lane_f16(a: float16x4_t) -> f16 { #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vduph_laneq_f16(a: float16x8_t) -> f16 { static_assert_uimm_bits!(N, 4); unsafe { simd_extract!(a, N as u32) } @@ -9977,6 +10093,7 @@ pub fn vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vfma_lane_f16( a: float16x4_t, b: float16x4_t, @@ -9992,6 +10109,7 @@ pub fn vfma_lane_f16( #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vfma_laneq_f16( a: float16x4_t, b: float16x4_t, @@ -10007,6 +10125,7 @@ pub fn vfma_laneq_f16( #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vfmaq_lane_f16( a: float16x8_t, b: float16x8_t, @@ -10022,6 +10141,7 @@ pub fn vfmaq_lane_f16( #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vfmaq_laneq_f16( a: float16x8_t, b: float16x8_t, @@ -10140,6 +10260,7 @@ pub fn vfma_laneq_f64( #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fmla))] pub fn vfma_n_f16(a: float16x4_t, b: float16x4_t, c: f16) -> float16x4_t { vfma_f16(a, b, vdup_n_f16(c)) @@ -10149,6 +10270,7 @@ pub fn vfma_n_f16(a: float16x4_t, b: float16x4_t, c: f16) -> float16x4_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fmla))] pub fn vfmaq_n_f16(a: float16x8_t, b: float16x8_t, c: f16) -> float16x8_t { vfmaq_f16(a, b, vdupq_n_f16(c)) @@ -10182,6 +10304,7 @@ pub fn vfmad_lane_f64(a: f64, b: f64, c: float64x1_t) -> f64 { #[cfg_attr(test, assert_instr(fmadd))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vfmah_f16(a: f16, b: f16, c: f16) -> f16 { unsafe { fmaf16(b, c, a) } } @@ -10192,6 +10315,7 @@ pub fn vfmah_f16(a: f16, b: f16, c: f16) -> f16 { #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vfmah_lane_f16(a: f16, b: f16, v: float16x4_t) -> f16 { static_assert_uimm_bits!(LANE, 2); unsafe { @@ -10206,6 +10330,7 @@ pub fn vfmah_lane_f16(a: f16, b: f16, v: float16x4_t) -> f16 { #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vfmah_laneq_f16(a: f16, b: f16, v: float16x8_t) -> f16 { static_assert_uimm_bits!(LANE, 3); unsafe { @@ -10294,6 +10419,7 @@ pub fn vfmad_laneq_f64(a: f64, b: f64, c: float64x2_t) -> f64 { #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fmlal2))] pub fn vfmlal_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t { unsafe extern "unadjusted" { @@ -10311,6 +10437,7 @@ pub fn vfmlal_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float3 #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fmlal2))] pub fn vfmlalq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t { unsafe extern "unadjusted" { @@ -10330,6 +10457,7 @@ pub fn vfmlalq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[rustc_legacy_const_generics(3)] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vfmlal_lane_high_f16( r: float32x2_t, a: float16x4_t, @@ -10346,6 +10474,7 @@ pub fn vfmlal_lane_high_f16( #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[rustc_legacy_const_generics(3)] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vfmlal_laneq_high_f16( r: float32x2_t, a: float16x4_t, @@ -10362,6 +10491,7 @@ pub fn vfmlal_laneq_high_f16( #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[rustc_legacy_const_generics(3)] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vfmlalq_lane_high_f16( r: float32x4_t, a: float16x8_t, @@ -10378,6 +10508,7 @@ pub fn vfmlalq_lane_high_f16( #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[rustc_legacy_const_generics(3)] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vfmlalq_laneq_high_f16( r: float32x4_t, a: float16x8_t, @@ -10394,6 +10525,7 @@ pub fn vfmlalq_laneq_high_f16( #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[rustc_legacy_const_generics(3)] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vfmlal_lane_low_f16( r: float32x2_t, a: float16x4_t, @@ -10410,6 +10542,7 @@ pub fn vfmlal_lane_low_f16( #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[rustc_legacy_const_generics(3)] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vfmlal_laneq_low_f16( r: float32x2_t, a: float16x4_t, @@ -10426,6 +10559,7 @@ pub fn vfmlal_laneq_low_f16( #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[rustc_legacy_const_generics(3)] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vfmlalq_lane_low_f16( r: float32x4_t, a: float16x8_t, @@ -10442,6 +10576,7 @@ pub fn vfmlalq_lane_low_f16( #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[rustc_legacy_const_generics(3)] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vfmlalq_laneq_low_f16( r: float32x4_t, a: float16x8_t, @@ -10456,6 +10591,7 @@ pub fn vfmlalq_laneq_low_f16( #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fmlal))] pub fn vfmlal_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t { unsafe extern "unadjusted" { @@ -10473,6 +10609,7 @@ pub fn vfmlal_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32 #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fmlal))] pub fn vfmlalq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t { unsafe extern "unadjusted" { @@ -10490,6 +10627,7 @@ pub fn vfmlalq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float3 #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fmlsl2))] pub fn vfmlsl_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t { unsafe extern "unadjusted" { @@ -10507,6 +10645,7 @@ pub fn vfmlsl_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float3 #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fmlsl2))] pub fn vfmlslq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t { unsafe extern "unadjusted" { @@ -10526,6 +10665,7 @@ pub fn vfmlslq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[rustc_legacy_const_generics(3)] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vfmlsl_lane_high_f16( r: float32x2_t, a: float16x4_t, @@ -10542,6 +10682,7 @@ pub fn vfmlsl_lane_high_f16( #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[rustc_legacy_const_generics(3)] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vfmlsl_laneq_high_f16( r: float32x2_t, a: float16x4_t, @@ -10558,6 +10699,7 @@ pub fn vfmlsl_laneq_high_f16( #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[rustc_legacy_const_generics(3)] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vfmlslq_lane_high_f16( r: float32x4_t, a: float16x8_t, @@ -10574,6 +10716,7 @@ pub fn vfmlslq_lane_high_f16( #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[rustc_legacy_const_generics(3)] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vfmlslq_laneq_high_f16( r: float32x4_t, a: float16x8_t, @@ -10590,6 +10733,7 @@ pub fn vfmlslq_laneq_high_f16( #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[rustc_legacy_const_generics(3)] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vfmlsl_lane_low_f16( r: float32x2_t, a: float16x4_t, @@ -10606,6 +10750,7 @@ pub fn vfmlsl_lane_low_f16( #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[rustc_legacy_const_generics(3)] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vfmlsl_laneq_low_f16( r: float32x2_t, a: float16x4_t, @@ -10622,6 +10767,7 @@ pub fn vfmlsl_laneq_low_f16( #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[rustc_legacy_const_generics(3)] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vfmlslq_lane_low_f16( r: float32x4_t, a: float16x8_t, @@ -10638,6 +10784,7 @@ pub fn vfmlslq_lane_low_f16( #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[rustc_legacy_const_generics(3)] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vfmlslq_laneq_low_f16( r: float32x4_t, a: float16x8_t, @@ -10652,6 +10799,7 @@ pub fn vfmlslq_laneq_low_f16( #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fmlsl))] pub fn vfmlsl_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t { unsafe extern "unadjusted" { @@ -10669,6 +10817,7 @@ pub fn vfmlsl_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32 #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fmlsl))] pub fn vfmlslq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t { unsafe extern "unadjusted" { @@ -10699,6 +10848,7 @@ pub fn vfms_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vfms_lane_f16( a: float16x4_t, b: float16x4_t, @@ -10714,6 +10864,7 @@ pub fn vfms_lane_f16( #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vfms_laneq_f16( a: float16x4_t, b: float16x4_t, @@ -10729,6 +10880,7 @@ pub fn vfms_laneq_f16( #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vfmsq_lane_f16( a: float16x8_t, b: float16x8_t, @@ -10744,6 +10896,7 @@ pub fn vfmsq_lane_f16( #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vfmsq_laneq_f16( a: float16x8_t, b: float16x8_t, @@ -10862,6 +11015,7 @@ pub fn vfms_laneq_f64( #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fmls))] pub fn vfms_n_f16(a: float16x4_t, b: float16x4_t, c: f16) -> float16x4_t { vfms_f16(a, b, vdup_n_f16(c)) @@ -10871,6 +11025,7 @@ pub fn vfms_n_f16(a: float16x4_t, b: float16x4_t, c: f16) -> float16x4_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fmls))] pub fn vfmsq_n_f16(a: float16x8_t, b: float16x8_t, c: f16) -> float16x8_t { vfmsq_f16(a, b, vdupq_n_f16(c)) @@ -10890,6 +11045,7 @@ pub fn vfms_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t { #[cfg_attr(test, assert_instr(fmsub))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vfmsh_f16(a: f16, b: f16, c: f16) -> f16 { vfmah_f16(a, -b, c) } @@ -10900,6 +11056,7 @@ pub fn vfmsh_f16(a: f16, b: f16, c: f16) -> f16 { #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vfmsh_lane_f16(a: f16, b: f16, v: float16x4_t) -> f16 { static_assert_uimm_bits!(LANE, 2); unsafe { @@ -10914,6 +11071,7 @@ pub fn vfmsh_lane_f16(a: f16, b: f16, v: float16x4_t) -> f16 { #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vfmsh_laneq_f16(a: f16, b: f16, v: float16x8_t) -> f16 { static_assert_uimm_bits!(LANE, 3); unsafe { @@ -11005,6 +11163,7 @@ pub fn vfmsd_laneq_f64(a: f64, b: f64, c: float64x2_t) -> f64 { #[target_feature(enable = "neon,fp16")] #[cfg_attr(test, assert_instr(ldr))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld1_f16(ptr: *const f16) -> float16x4_t { crate::ptr::read_unaligned(ptr.cast()) } @@ -11016,6 +11175,7 @@ pub unsafe fn vld1_f16(ptr: *const f16) -> float16x4_t { #[target_feature(enable = "neon,fp16")] #[cfg_attr(test, assert_instr(ldr))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld1q_f16(ptr: *const f16) -> float16x8_t { crate::ptr::read_unaligned(ptr.cast()) } @@ -13107,6 +13267,7 @@ pub fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fmax))] pub fn vmaxh_f16(a: f16, b: f16) -> f16 { unsafe extern "unadjusted" { @@ -13141,6 +13302,7 @@ pub fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fmaxnm))] pub fn vmaxnmh_f16(a: f16, b: f16) -> f16 { f16::max(a, b) @@ -13150,6 +13312,7 @@ pub fn vmaxnmh_f16(a: f16, b: f16) -> f16 { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fmaxnmv))] pub fn vmaxnmv_f16(a: float16x4_t) -> f16 { unsafe { simd_reduce_max(a) } @@ -13159,6 +13322,7 @@ pub fn vmaxnmv_f16(a: float16x4_t) -> f16 { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fmaxnmv))] pub fn vmaxnmvq_f16(a: float16x8_t) -> f16 { unsafe { simd_reduce_max(a) } @@ -13195,6 +13359,7 @@ pub fn vmaxnmvq_f32(a: float32x4_t) -> f32 { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fmaxv))] pub fn vmaxv_f16(a: float16x4_t) -> f16 { unsafe extern "unadjusted" { @@ -13211,6 +13376,7 @@ pub fn vmaxv_f16(a: float16x4_t) -> f16 { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fmaxv))] pub fn vmaxvq_f16(a: float16x8_t) -> f16 { unsafe extern "unadjusted" { @@ -13415,6 +13581,7 @@ pub fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fmin))] pub fn vminh_f16(a: f16, b: f16) -> f16 { unsafe extern "unadjusted" { @@ -13449,6 +13616,7 @@ pub fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fminnm))] pub fn vminnmh_f16(a: f16, b: f16) -> f16 { f16::min(a, b) @@ -13458,6 +13626,7 @@ pub fn vminnmh_f16(a: f16, b: f16) -> f16 { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fminnmv))] pub fn vminnmv_f16(a: float16x4_t) -> f16 { unsafe { simd_reduce_min(a) } @@ -13467,6 +13636,7 @@ pub fn vminnmv_f16(a: float16x4_t) -> f16 { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fminnmv))] pub fn vminnmvq_f16(a: float16x8_t) -> f16 { unsafe { simd_reduce_min(a) } @@ -13503,6 +13673,7 @@ pub fn vminnmvq_f32(a: float32x4_t) -> f32 { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fminv))] pub fn vminv_f16(a: float16x4_t) -> f16 { unsafe extern "unadjusted" { @@ -13519,6 +13690,7 @@ pub fn vminv_f16(a: float16x4_t) -> f16 { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fminv))] pub fn vminvq_f16(a: float16x8_t) -> f16 { unsafe extern "unadjusted" { @@ -14554,6 +14726,7 @@ pub fn vmul_lane_f64(a: float64x1_t, b: float64x1_t) -> float64 #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vmul_laneq_f16(a: float16x4_t, b: float16x8_t) -> float16x4_t { static_assert_uimm_bits!(LANE, 3); unsafe { @@ -14570,6 +14743,7 @@ pub fn vmul_laneq_f16(a: float16x4_t, b: float16x8_t) -> float1 #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vmulq_laneq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { static_assert_uimm_bits!(LANE, 3); unsafe { @@ -14640,6 +14814,7 @@ pub fn vmuld_lane_f64(a: f64, b: float64x1_t) -> f64 { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(nop))] pub fn vmulh_f16(a: f16, b: f16) -> f16 { a * b @@ -14651,6 +14826,7 @@ pub fn vmulh_f16(a: f16, b: f16) -> f16 { #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vmulh_lane_f16(a: f16, b: float16x4_t) -> f16 { static_assert_uimm_bits!(LANE, 2); unsafe { @@ -14665,6 +14841,7 @@ pub fn vmulh_lane_f16(a: f16, b: float16x4_t) -> f16 { #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vmulh_laneq_f16(a: f16, b: float16x8_t) -> f16 { static_assert_uimm_bits!(LANE, 3); unsafe { @@ -15073,6 +15250,7 @@ pub fn vmuld_laneq_f64(a: f64, b: float64x2_t) -> f64 { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fmulx))] pub fn vmulx_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe extern "unadjusted" { @@ -15089,6 +15267,7 @@ pub fn vmulx_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fmulx))] pub fn vmulxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unsafe extern "unadjusted" { @@ -15171,6 +15350,7 @@ pub fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vmulx_lane_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { static_assert_uimm_bits!(LANE, 2); unsafe { @@ -15187,6 +15367,7 @@ pub fn vmulx_lane_f16(a: float16x4_t, b: float16x4_t) -> float1 #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vmulx_laneq_f16(a: float16x4_t, b: float16x8_t) -> float16x4_t { static_assert_uimm_bits!(LANE, 3); unsafe { @@ -15203,6 +15384,7 @@ pub fn vmulx_laneq_f16(a: float16x4_t, b: float16x8_t) -> float #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vmulxq_lane_f16(a: float16x8_t, b: float16x4_t) -> float16x8_t { static_assert_uimm_bits!(LANE, 2); unsafe { @@ -15232,6 +15414,7 @@ pub fn vmulxq_lane_f16(a: float16x8_t, b: float16x4_t) -> float #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vmulxq_laneq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { static_assert_uimm_bits!(LANE, 3); unsafe { @@ -15347,6 +15530,7 @@ pub fn vmulx_laneq_f64(a: float64x1_t, b: float64x2_t) -> float #[cfg_attr(test, assert_instr(fmulx))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vmulx_n_f16(a: float16x4_t, b: f16) -> float16x4_t { vmulx_f16(a, vdup_n_f16(b)) } @@ -15356,6 +15540,7 @@ pub fn vmulx_n_f16(a: float16x4_t, b: f16) -> float16x4_t { #[cfg_attr(test, assert_instr(fmulx))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vmulxq_n_f16(a: float16x8_t, b: f16) -> float16x8_t { vmulxq_f16(a, vdupq_n_f16(b)) } @@ -15440,6 +15625,7 @@ pub fn vmulxs_laneq_f32(a: f32, b: float32x4_t) -> f32 { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fmulx))] pub fn vmulxh_f16(a: f16, b: f16) -> f16 { unsafe extern "unadjusted" { @@ -15458,6 +15644,7 @@ pub fn vmulxh_f16(a: f16, b: f16) -> f16 { #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vmulxh_lane_f16(a: f16, b: float16x4_t) -> f16 { static_assert_uimm_bits!(LANE, 2); unsafe { vmulxh_f16(a, simd_extract!(b, LANE as u32)) } @@ -15469,6 +15656,7 @@ pub fn vmulxh_lane_f16(a: f16, b: float16x4_t) -> f16 { #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vmulxh_laneq_f16(a: f16, b: float16x8_t) -> f16 { static_assert_uimm_bits!(LANE, 3); unsafe { vmulxh_f16(a, simd_extract!(b, LANE as u32)) } @@ -15534,6 +15722,7 @@ pub fn vnegd_s64(a: i64) -> i64 { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fneg))] pub fn vnegh_f16(a: f16) -> f16 { -a @@ -15587,6 +15776,7 @@ pub fn vpaddd_u64(a: uint64x2_t) -> u64 { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(faddp))] pub fn vpaddq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unsafe extern "unadjusted" { @@ -15805,6 +15995,7 @@ pub fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fmaxp))] pub fn vpmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe extern "unadjusted" { @@ -15821,6 +16012,7 @@ pub fn vpmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fmaxp))] pub fn vpmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unsafe extern "unadjusted" { @@ -15837,6 +16029,7 @@ pub fn vpmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fmaxnmp))] pub fn vpmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe extern "unadjusted" { @@ -15853,6 +16046,7 @@ pub fn vpmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fmaxnmp))] pub fn vpmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unsafe extern "unadjusted" { @@ -16109,6 +16303,7 @@ pub fn vpmaxs_f32(a: float32x2_t) -> f32 { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fminp))] pub fn vpmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe extern "unadjusted" { @@ -16125,6 +16320,7 @@ pub fn vpmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fminp))] pub fn vpminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unsafe extern "unadjusted" { @@ -16141,6 +16337,7 @@ pub fn vpminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fminnmp))] pub fn vpminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe extern "unadjusted" { @@ -16157,6 +16354,7 @@ pub fn vpminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fminnmp))] pub fn vpminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unsafe extern "unadjusted" { @@ -21135,6 +21333,7 @@ pub fn vrecpes_f32(a: f32) -> f32 { #[cfg_attr(test, assert_instr(frecpe))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vrecpeh_f16(a: f16) -> f16 { unsafe extern "unadjusted" { #[cfg_attr( @@ -21215,6 +21414,7 @@ pub fn vrecpss_f32(a: f32, b: f32) -> f32 { #[cfg_attr(test, assert_instr(frecps))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vrecpsh_f16(a: f16, b: f16) -> f16 { unsafe extern "unadjusted" { #[cfg_attr( @@ -21263,6 +21463,7 @@ pub fn vrecpxs_f32(a: f32) -> f32 { #[cfg_attr(test, assert_instr(frecpx))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vrecpxh_f16(a: f16) -> f16 { unsafe extern "unadjusted" { #[cfg_attr( @@ -21276,164 +21477,73 @@ pub fn vrecpxh_f16(a: f16) -> f16 { #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f16)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f16)"] -#[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f16)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f64)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f16_f64(a: float64x1_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpret_f16_f64(a: float64x1_t) -> float16x4_t { - unsafe { - let ret_val: float16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f64)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t { - let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: float16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_p128(a: p128) -> float64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_f64_p128(a: p128) -> float64x2_t { - unsafe { - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -21441,23 +21551,8 @@ pub fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -21465,23 +21560,8 @@ pub fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -21489,22 +21569,8 @@ pub fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t { - unsafe { - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -21512,22 +21578,8 @@ pub fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t { - unsafe { - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -21535,22 +21587,8 @@ pub fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t { - unsafe { - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -21558,19 +21596,6 @@ pub fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t { - unsafe { - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f64)"] #[inline] #[target_feature(enable = "neon")] @@ -21582,7 +21607,6 @@ pub fn vreinterpret_s64_f64(a: float64x1_t) -> int64x1_t { #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -21590,22 +21614,8 @@ pub fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t { - unsafe { - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -21613,22 +21623,8 @@ pub fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t { - unsafe { - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -21636,19 +21632,6 @@ pub fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t { - unsafe { - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f64)"] #[inline] #[target_feature(enable = "neon")] @@ -21660,7 +21643,6 @@ pub fn vreinterpret_u64_f64(a: float64x1_t) -> uint64x1_t { #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -21668,22 +21650,8 @@ pub fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t { - unsafe { - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -21691,19 +21659,6 @@ pub fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t { - unsafe { - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f64)"] #[inline] #[target_feature(enable = "neon")] @@ -21715,7 +21670,6 @@ pub fn vreinterpret_p64_f64(a: float64x1_t) -> poly64x1_t { #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -21723,120 +21677,44 @@ pub fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 { - let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t { +pub fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t { - let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t { +pub fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t { - let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t { - let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t { - let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"] -#[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -21844,712 +21722,292 @@ pub fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t { - let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t { - let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t { - let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t { - let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t { - let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t { - let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t { - let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t { - let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s64)"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_s64(a: int64x1_t) -> float64x1_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s64)"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_p64_s64(a: int64x1_t) -> poly64x1_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t { +pub fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t { +pub fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t { +pub fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t { +pub fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t { +pub fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t { +pub fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t { +pub fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpret_f64_s64(a: int64x1_t) -> float64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t { +pub fn vreinterpret_p64_s64(a: int64x1_t) -> poly64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_u64(a: uint64x1_t) -> float64x1_t { +pub fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_p64_u64(a: uint64x1_t) -> poly64x1_t { +pub fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t { +pub fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t { +pub fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t { +pub fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_f64_u64(a: uint64x1_t) -> float64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t { +pub fn vreinterpret_p64_u64(a: uint64x1_t) -> poly64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t { +pub fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t { +pub fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t { +pub fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t { - unsafe { - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p64)"] @@ -22581,7 +22039,6 @@ pub fn vreinterpret_u64_p64(a: poly64x1_t) -> uint64x1_t { #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -22589,23 +22046,8 @@ pub fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -22613,23 +22055,8 @@ pub fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -22637,43 +22064,14 @@ pub fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t { unsafe { transmute(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} #[doc = "Floating-point round to 32-bit integer, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f32)"] #[inline] @@ -22935,6 +22333,7 @@ pub fn vrnd64z_f64(a: float64x1_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(frintz))] pub fn vrnd_f16(a: float16x4_t) -> float16x4_t { unsafe { simd_trunc(a) } @@ -22944,6 +22343,7 @@ pub fn vrnd_f16(a: float16x4_t) -> float16x4_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(frintz))] pub fn vrndq_f16(a: float16x8_t) -> float16x8_t { unsafe { simd_trunc(a) } @@ -22989,6 +22389,7 @@ pub fn vrndq_f64(a: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(frinta))] pub fn vrnda_f16(a: float16x4_t) -> float16x4_t { unsafe { simd_round(a) } @@ -22998,6 +22399,7 @@ pub fn vrnda_f16(a: float16x4_t) -> float16x4_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(frinta))] pub fn vrndaq_f16(a: float16x8_t) -> float16x8_t { unsafe { simd_round(a) } @@ -23043,6 +22445,7 @@ pub fn vrndaq_f64(a: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(frinta))] pub fn vrndah_f16(a: f16) -> f16 { unsafe { roundf16(a) } @@ -23052,6 +22455,7 @@ pub fn vrndah_f16(a: f16) -> f16 { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(frintz))] pub fn vrndh_f16(a: f16) -> f16 { unsafe { truncf16(a) } @@ -23061,6 +22465,7 @@ pub fn vrndh_f16(a: f16) -> f16 { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(frinti))] pub fn vrndi_f16(a: float16x4_t) -> float16x4_t { unsafe extern "unadjusted" { @@ -23077,6 +22482,7 @@ pub fn vrndi_f16(a: float16x4_t) -> float16x4_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(frinti))] pub fn vrndiq_f16(a: float16x8_t) -> float16x8_t { unsafe extern "unadjusted" { @@ -23157,6 +22563,7 @@ pub fn vrndiq_f64(a: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(frinti))] pub fn vrndih_f16(a: f16) -> f16 { unsafe extern "unadjusted" { @@ -23173,6 +22580,7 @@ pub fn vrndih_f16(a: f16) -> f16 { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(frintm))] pub fn vrndm_f16(a: float16x4_t) -> float16x4_t { unsafe { simd_floor(a) } @@ -23182,6 +22590,7 @@ pub fn vrndm_f16(a: float16x4_t) -> float16x4_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(frintm))] pub fn vrndmq_f16(a: float16x8_t) -> float16x8_t { unsafe { simd_floor(a) } @@ -23227,6 +22636,7 @@ pub fn vrndmq_f64(a: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(frintm))] pub fn vrndmh_f16(a: f16) -> f16 { unsafe { floorf16(a) } @@ -23268,6 +22678,7 @@ pub fn vrndnq_f64(a: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(frintn))] pub fn vrndnh_f16(a: f16) -> f16 { unsafe extern "unadjusted" { @@ -23300,6 +22711,7 @@ pub fn vrndns_f32(a: f32) -> f32 { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(frintp))] pub fn vrndp_f16(a: float16x4_t) -> float16x4_t { unsafe { simd_ceil(a) } @@ -23309,6 +22721,7 @@ pub fn vrndp_f16(a: float16x4_t) -> float16x4_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(frintp))] pub fn vrndpq_f16(a: float16x8_t) -> float16x8_t { unsafe { simd_ceil(a) } @@ -23354,6 +22767,7 @@ pub fn vrndpq_f64(a: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(frintp))] pub fn vrndph_f16(a: f16) -> f16 { unsafe { ceilf16(a) } @@ -23363,6 +22777,7 @@ pub fn vrndph_f16(a: f16) -> f16 { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(frintx))] pub fn vrndx_f16(a: float16x4_t) -> float16x4_t { unsafe { simd_round_ties_even(a) } @@ -23372,6 +22787,7 @@ pub fn vrndx_f16(a: float16x4_t) -> float16x4_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(frintx))] pub fn vrndxq_f16(a: float16x8_t) -> float16x8_t { unsafe { simd_round_ties_even(a) } @@ -23417,6 +22833,7 @@ pub fn vrndxq_f64(a: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(frintx))] pub fn vrndxh_f16(a: f16) -> f16 { round_ties_even_f16(a) @@ -23623,6 +23040,7 @@ pub fn vrsqrtes_f32(a: f32) -> f32 { #[cfg_attr(test, assert_instr(frsqrte))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vrsqrteh_f16(a: f16) -> f16 { unsafe extern "unadjusted" { #[cfg_attr( @@ -23703,6 +23121,7 @@ pub fn vrsqrtss_f32(a: f32, b: f32) -> f32 { #[target_feature(enable = "neon,fp16")] #[cfg_attr(test, assert_instr(frsqrts))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vrsqrtsh_f16(a: f16, b: f16) -> f16 { unsafe extern "unadjusted" { #[cfg_attr( @@ -24791,6 +24210,7 @@ pub fn vsqadds_u32(a: u32, b: i32) -> u32 { #[cfg_attr(test, assert_instr(fsqrt))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vsqrt_f16(a: float16x4_t) -> float16x4_t { unsafe { simd_fsqrt(a) } } @@ -24800,6 +24220,7 @@ pub fn vsqrt_f16(a: float16x4_t) -> float16x4_t { #[cfg_attr(test, assert_instr(fsqrt))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vsqrtq_f16(a: float16x8_t) -> float16x8_t { unsafe { simd_fsqrt(a) } } @@ -24844,6 +24265,7 @@ pub fn vsqrtq_f64(a: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fsqrt))] pub fn vsqrth_f16(a: f16) -> f16 { unsafe { sqrtf16(a) } @@ -25177,6 +24599,7 @@ pub fn vsrid_n_u64(a: u64, b: u64) -> u64 { #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vst1_f16(ptr: *mut f16, a: float16x4_t) { crate::ptr::write_unaligned(ptr.cast(), a) } @@ -25189,6 +24612,7 @@ pub unsafe fn vst1_f16(ptr: *mut f16, a: float16x4_t) { #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vst1q_f16(ptr: *mut f16, a: float16x8_t) { crate::ptr::write_unaligned(ptr.cast(), a) } @@ -26488,6 +25912,7 @@ pub fn vsubd_u64(a: u64, b: u64) -> u64 { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(nop))] pub fn vsubh_f16(a: f16, b: f16) -> f16 { a - b @@ -27283,6 +26708,7 @@ pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] pub fn vtrn1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) } @@ -27292,6 +26718,7 @@ pub fn vtrn1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] pub fn vtrn1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } @@ -27517,6 +26944,7 @@ pub fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] pub fn vtrn2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) } @@ -27526,6 +26954,7 @@ pub fn vtrn2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] pub fn vtrn2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } @@ -28056,6 +27485,7 @@ pub fn vusdotq_laneq_s32(a: int32x4_t, b: uint8x16_t, c: int8x1 #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] pub fn vuzp1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) } @@ -28065,6 +27495,7 @@ pub fn vuzp1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] pub fn vuzp1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } @@ -28290,6 +27721,7 @@ pub fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] pub fn vuzp2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) } @@ -28299,6 +27731,7 @@ pub fn vuzp2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] pub fn vuzp2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } @@ -28542,6 +27975,7 @@ pub fn vxarq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] pub fn vzip1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) } @@ -28551,6 +27985,7 @@ pub fn vzip1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] pub fn vzip1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } @@ -28776,6 +28211,7 @@ pub fn vzip1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] pub fn vzip2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) } @@ -28785,6 +28221,7 @@ pub fn vzip2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { #[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] pub fn vzip2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } diff --git a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs index 32531c7da1356..e4e4e040f468d 100644 --- a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs +++ b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs @@ -820,6 +820,7 @@ pub fn vabaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vabd_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v4f16")] @@ -842,6 +843,7 @@ pub fn vabd_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vabdq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v8f16")] @@ -1405,6 +1407,7 @@ pub fn vabdl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vabs_f16(a: float16x4_t) -> float16x4_t { unsafe { simd_fabs(a) } } @@ -1419,6 +1422,7 @@ pub fn vabs_f16(a: float16x4_t) -> float16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vabsq_f16(a: float16x8_t) -> float16x8_t { unsafe { simd_fabs(a) } } @@ -1625,6 +1629,7 @@ pub fn vabsq_s32(a: int32x4_t) -> int32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vabsh_f16(a: f16) -> f16 { unsafe { simd_extract!(vabs_f16(vdup_n_f16(a)), 0) } } @@ -1639,6 +1644,7 @@ pub fn vabsh_f16(a: f16) -> f16 { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vadd_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe { simd_add(a, b) } } @@ -1653,6 +1659,7 @@ pub fn vadd_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vaddq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unsafe { simd_add(a, b) } } @@ -2129,6 +2136,7 @@ pub fn vaddq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vaddh_f16(a: f16, b: f16) -> f16 { a + b } @@ -3828,6 +3836,7 @@ pub fn vbicq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { assert_instr(bsl) )] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vbsl_f16(a: uint16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t { let not = int16x4_t::splat(-1); unsafe { @@ -3848,6 +3857,7 @@ pub fn vbsl_f16(a: uint16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t { assert_instr(bsl) )] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vbslq_f16(a: uint16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t { let not = int16x8_t::splat(-1); unsafe { @@ -4462,6 +4472,7 @@ pub fn vbslq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcage_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacge.v4i16.v4f16")] @@ -4484,6 +4495,7 @@ pub fn vcage_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcageq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacge.v8i16.v8f16")] @@ -4564,6 +4576,7 @@ pub fn vcageq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcagt_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacgt.v4i16.v4f16")] @@ -4586,6 +4599,7 @@ pub fn vcagt_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcagtq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacgt.v8i16.v8f16")] @@ -4666,6 +4680,7 @@ pub fn vcagtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcale_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { vcage_f16(b, a) } @@ -4680,6 +4695,7 @@ pub fn vcale_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcaleq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { vcageq_f16(b, a) } @@ -4736,6 +4752,7 @@ pub fn vcaleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcalt_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { vcagt_f16(b, a) } @@ -4750,6 +4767,7 @@ pub fn vcalt_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcaltq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { vcagtq_f16(b, a) } @@ -4806,6 +4824,7 @@ pub fn vcaltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vceq_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { unsafe { simd_eq(a, b) } } @@ -4820,6 +4839,7 @@ pub fn vceq_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vceqq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { unsafe { simd_eq(a, b) } } @@ -5170,6 +5190,7 @@ pub fn vceqq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcge_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { unsafe { simd_ge(a, b) } } @@ -5184,6 +5205,7 @@ pub fn vcge_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcgeq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { unsafe { simd_ge(a, b) } } @@ -5492,6 +5514,7 @@ pub fn vcgeq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcgez_f16(a: float16x4_t) -> uint16x4_t { let b: f16x4 = f16x4::new(0.0, 0.0, 0.0, 0.0); unsafe { simd_ge(a, transmute(b)) } @@ -5507,6 +5530,7 @@ pub fn vcgez_f16(a: float16x4_t) -> uint16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcgezq_f16(a: float16x8_t) -> uint16x8_t { let b: f16x8 = f16x8::new(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0); unsafe { simd_ge(a, transmute(b)) } @@ -5522,6 +5546,7 @@ pub fn vcgezq_f16(a: float16x8_t) -> uint16x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcgt_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { unsafe { simd_gt(a, b) } } @@ -5536,6 +5561,7 @@ pub fn vcgt_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcgtq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { unsafe { simd_gt(a, b) } } @@ -5844,6 +5870,7 @@ pub fn vcgtq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcgtz_f16(a: float16x4_t) -> uint16x4_t { let b: f16x4 = f16x4::new(0.0, 0.0, 0.0, 0.0); unsafe { simd_gt(a, transmute(b)) } @@ -5859,6 +5886,7 @@ pub fn vcgtz_f16(a: float16x4_t) -> uint16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcgtzq_f16(a: float16x8_t) -> uint16x8_t { let b: f16x8 = f16x8::new(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0); unsafe { simd_gt(a, transmute(b)) } @@ -5874,6 +5902,7 @@ pub fn vcgtzq_f16(a: float16x8_t) -> uint16x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcle_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { unsafe { simd_le(a, b) } } @@ -5888,6 +5917,7 @@ pub fn vcle_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcleq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { unsafe { simd_le(a, b) } } @@ -6196,6 +6226,7 @@ pub fn vcleq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vclez_f16(a: float16x4_t) -> uint16x4_t { let b: f16x4 = f16x4::new(0.0, 0.0, 0.0, 0.0); unsafe { simd_le(a, transmute(b)) } @@ -6211,6 +6242,7 @@ pub fn vclez_f16(a: float16x4_t) -> uint16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vclezq_f16(a: float16x8_t) -> uint16x8_t { let b: f16x8 = f16x8::new(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0); unsafe { simd_le(a, transmute(b)) } @@ -6526,6 +6558,7 @@ pub fn vclsq_u32(a: uint32x4_t) -> int32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vclt_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { unsafe { simd_lt(a, b) } } @@ -6540,6 +6573,7 @@ pub fn vclt_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcltq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { unsafe { simd_lt(a, b) } } @@ -6848,6 +6882,7 @@ pub fn vcltq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcltz_f16(a: float16x4_t) -> uint16x4_t { let b: f16x4 = f16x4::new(0.0, 0.0, 0.0, 0.0); unsafe { simd_lt(a, transmute(b)) } @@ -6863,6 +6898,7 @@ pub fn vcltz_f16(a: float16x4_t) -> uint16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcltzq_f16(a: float16x8_t) -> uint16x8_t { let b: f16x8 = f16x8::new(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0); unsafe { simd_lt(a, transmute(b)) } @@ -7536,6 +7572,7 @@ pub fn vcntq_p8(a: poly8x16_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(nop))] pub fn vcombine_f16(a: float16x4_t, b: float16x4_t) -> float16x8_t { unsafe { simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]) } @@ -7756,6 +7793,7 @@ pub fn vcombine_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcreate_f16(a: u64) -> float16x4_t { unsafe { transmute(a) } } @@ -7771,6 +7809,7 @@ pub fn vcreate_f16(a: u64) -> float16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcreate_f16(a: u64) -> float16x4_t { unsafe { let ret_val: float16x4_t = transmute(a); @@ -8274,6 +8313,7 @@ pub fn vcreate_p64(a: u64) -> poly64x1_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvt_f16_f32(a: float32x4_t) -> float16x4_t { unsafe { simd_cast(a) } } @@ -8288,6 +8328,7 @@ pub fn vcvt_f16_f32(a: float32x4_t) -> float16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvt_f16_s16(a: int16x4_t) -> float16x4_t { unsafe { simd_cast(a) } } @@ -8302,6 +8343,7 @@ pub fn vcvt_f16_s16(a: int16x4_t) -> float16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtq_f16_s16(a: int16x8_t) -> float16x8_t { unsafe { simd_cast(a) } } @@ -8316,6 +8358,7 @@ pub fn vcvtq_f16_s16(a: int16x8_t) -> float16x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvt_f16_u16(a: uint16x4_t) -> float16x4_t { unsafe { simd_cast(a) } } @@ -8330,6 +8373,7 @@ pub fn vcvt_f16_u16(a: uint16x4_t) -> float16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtq_f16_u16(a: uint16x8_t) -> float16x8_t { unsafe { simd_cast(a) } } @@ -8344,6 +8388,7 @@ pub fn vcvtq_f16_u16(a: uint16x8_t) -> float16x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvt_f32_f16(a: float16x4_t) -> float32x4_t { unsafe { simd_cast(a) } } @@ -8443,6 +8488,7 @@ pub fn vcvtq_f32_u32(a: uint32x4_t) -> float32x4_t { #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvt_n_f16_s16(a: int16x4_t) -> float16x4_t { static_assert!(N >= 1 && N <= 16); unsafe extern "unadjusted" { @@ -8470,6 +8516,7 @@ pub fn vcvt_n_f16_s16(a: int16x4_t) -> float16x4_t { #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtq_n_f16_s16(a: int16x8_t) -> float16x8_t { static_assert!(N >= 1 && N <= 16); unsafe extern "unadjusted" { @@ -8497,6 +8544,7 @@ pub fn vcvtq_n_f16_s16(a: int16x8_t) -> float16x8_t { #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvt_n_f16_u16(a: uint16x4_t) -> float16x4_t { static_assert!(N >= 1 && N <= 16); unsafe extern "unadjusted" { @@ -8524,6 +8572,7 @@ pub fn vcvt_n_f16_u16(a: uint16x4_t) -> float16x4_t { #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtq_n_f16_u16(a: uint16x8_t) -> float16x8_t { static_assert!(N >= 1 && N <= 16); unsafe extern "unadjusted" { @@ -8703,6 +8752,7 @@ pub fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvt_n_s16_f16(a: float16x4_t) -> int16x4_t { static_assert!(N >= 1 && N <= 16); unsafe extern "unadjusted" { @@ -8730,6 +8780,7 @@ pub fn vcvt_n_s16_f16(a: float16x4_t) -> int16x4_t { #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtq_n_s16_f16(a: float16x8_t) -> int16x8_t { static_assert!(N >= 1 && N <= 16); unsafe extern "unadjusted" { @@ -8833,6 +8884,7 @@ pub fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvt_n_u16_f16(a: float16x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); unsafe extern "unadjusted" { @@ -8860,6 +8912,7 @@ pub fn vcvt_n_u16_f16(a: float16x4_t) -> uint16x4_t { #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtq_n_u16_f16(a: float16x8_t) -> uint16x8_t { static_assert!(N >= 1 && N <= 16); unsafe extern "unadjusted" { @@ -8962,6 +9015,7 @@ pub fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvt_s16_f16(a: float16x4_t) -> int16x4_t { unsafe { simd_cast(a) } } @@ -8976,6 +9030,7 @@ pub fn vcvt_s16_f16(a: float16x4_t) -> int16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtq_s16_f16(a: float16x8_t) -> int16x8_t { unsafe { simd_cast(a) } } @@ -9048,6 +9103,7 @@ pub fn vcvtq_s32_f32(a: float32x4_t) -> int32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvt_u16_f16(a: float16x4_t) -> uint16x4_t { unsafe { simd_cast(a) } } @@ -9062,6 +9118,7 @@ pub fn vcvt_u16_f16(a: float16x4_t) -> uint16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vcvtq_u16_f16(a: float16x8_t) -> uint16x8_t { unsafe { simd_cast(a) } } @@ -9361,6 +9418,7 @@ pub fn vdotq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vdup_lane_f16(a: float16x4_t) -> float16x4_t { static_assert_uimm_bits!(N, 2); unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } @@ -9377,6 +9435,7 @@ pub fn vdup_lane_f16(a: float16x4_t) -> float16x4_t { #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vdupq_lane_f16(a: float16x4_t) -> float16x8_t { static_assert_uimm_bits!(N, 2); unsafe { @@ -9922,6 +9981,7 @@ pub fn vdup_lane_u64(a: uint64x1_t) -> uint64x1_t { #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vdup_laneq_f16(a: float16x8_t) -> float16x4_t { static_assert_uimm_bits!(N, 3); unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } @@ -9938,6 +9998,7 @@ pub fn vdup_laneq_f16(a: float16x8_t) -> float16x4_t { #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vdupq_laneq_f16(a: float16x8_t) -> float16x8_t { static_assert_uimm_bits!(N, 3); unsafe { @@ -10482,6 +10543,7 @@ pub fn vdup_laneq_u64(a: uint64x2_t) -> uint64x1_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vdup_n_f16(a: f16) -> float16x4_t { float16x4_t::splat(a) } @@ -10496,6 +10558,7 @@ pub fn vdup_n_f16(a: f16) -> float16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vdupq_n_f16(a: f16) -> float16x8_t { float16x8_t::splat(a) } @@ -11443,6 +11506,7 @@ pub fn veorq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vext_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { static_assert_uimm_bits!(N, 2); unsafe { @@ -11814,6 +11878,7 @@ pub fn vextq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vextq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { static_assert_uimm_bits!(N, 3); unsafe { @@ -12394,6 +12459,7 @@ pub fn vextq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vfma_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t { unsafe { simd_fma(b, c, a) } } @@ -12408,6 +12474,7 @@ pub fn vfma_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vfmaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t { unsafe { simd_fma(b, c, a) } } @@ -12507,6 +12574,7 @@ pub fn vfmaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vfms_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t { unsafe { let b: float16x4_t = simd_neg(b); @@ -12525,6 +12593,7 @@ pub fn vfms_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vfmsq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t { unsafe { let b: float16x8_t = simd_neg(b); @@ -12627,6 +12696,7 @@ pub fn vfmsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(nop))] pub fn vget_high_f16(a: float16x8_t) -> float16x4_t { unsafe { simd_shuffle!(a, a, [4, 5, 6, 7]) } @@ -12637,6 +12707,7 @@ pub fn vget_high_f16(a: float16x8_t) -> float16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(nop))] pub fn vget_low_f16(a: float16x8_t) -> float16x4_t { unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) } @@ -12884,6 +12955,7 @@ pub fn vget_high_u64(a: uint64x2_t) -> uint64x1_t { )] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vget_lane_f16(a: float16x4_t) -> f16 { static_assert_uimm_bits!(LANE, 2); unsafe { simd_extract!(a, LANE as u32) } @@ -12900,6 +12972,7 @@ pub fn vget_lane_f16(a: float16x4_t) -> f16 { )] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vgetq_lane_f16(a: float16x8_t) -> f16 { static_assert_uimm_bits!(LANE, 3); unsafe { simd_extract!(a, LANE as u32) } @@ -14256,6 +14329,7 @@ pub fn vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld1_dup_f16(ptr: *const f16) -> float16x4_t { let x: float16x4_t = vld1_lane_f16::<0>(ptr, transmute(f16x4::splat(0.0))); simd_shuffle!(x, x, [0, 0, 0, 0]) @@ -14273,6 +14347,7 @@ pub unsafe fn vld1_dup_f16(ptr: *const f16) -> float16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld1q_dup_f16(ptr: *const f16) -> float16x8_t { let x: float16x8_t = vld1q_lane_f16::<0>(ptr, transmute(f16x8::splat(0.0))); simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) @@ -14843,6 +14918,7 @@ pub unsafe fn vld1_dup_u64(ptr: *const u64) -> uint64x1_t { #[target_feature(enable = "neon,v7")] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] pub unsafe fn vld1_f16(ptr: *const f16) -> float16x4_t { transmute(vld1_v4f16( @@ -14860,6 +14936,7 @@ pub unsafe fn vld1_f16(ptr: *const f16) -> float16x4_t { #[target_feature(enable = "neon,v7")] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] pub unsafe fn vld1_f16(ptr: *const f16) -> float16x4_t { let ret_val: float16x4_t = transmute(vld1_v4f16( @@ -14878,6 +14955,7 @@ pub unsafe fn vld1_f16(ptr: *const f16) -> float16x4_t { #[target_feature(enable = "neon,v7")] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] pub unsafe fn vld1q_f16(ptr: *const f16) -> float16x8_t { transmute(vld1q_v8f16( @@ -14895,6 +14973,7 @@ pub unsafe fn vld1q_f16(ptr: *const f16) -> float16x8_t { #[target_feature(enable = "neon,v7")] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] pub unsafe fn vld1q_f16(ptr: *const f16) -> float16x8_t { let ret_val: float16x8_t = transmute(vld1q_v8f16( @@ -14916,6 +14995,7 @@ pub unsafe fn vld1q_f16(ptr: *const f16) -> float16x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld1_f16_x2(a: *const f16) -> float16x4x2_t { unsafe extern "unadjusted" { #[cfg_attr( @@ -14940,6 +15020,7 @@ pub unsafe fn vld1_f16_x2(a: *const f16) -> float16x4x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld1_f16_x3(a: *const f16) -> float16x4x3_t { unsafe extern "unadjusted" { #[cfg_attr( @@ -14964,6 +15045,7 @@ pub unsafe fn vld1_f16_x3(a: *const f16) -> float16x4x3_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld1_f16_x4(a: *const f16) -> float16x4x4_t { unsafe extern "unadjusted" { #[cfg_attr( @@ -14988,6 +15070,7 @@ pub unsafe fn vld1_f16_x4(a: *const f16) -> float16x4x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld1q_f16_x2(a: *const f16) -> float16x8x2_t { unsafe extern "unadjusted" { #[cfg_attr( @@ -15012,6 +15095,7 @@ pub unsafe fn vld1q_f16_x2(a: *const f16) -> float16x8x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld1q_f16_x3(a: *const f16) -> float16x8x3_t { unsafe extern "unadjusted" { #[cfg_attr( @@ -15036,6 +15120,7 @@ pub unsafe fn vld1q_f16_x3(a: *const f16) -> float16x8x3_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld1q_f16_x4(a: *const f16) -> float16x8x4_t { unsafe extern "unadjusted" { #[cfg_attr( @@ -15732,6 +15817,7 @@ pub unsafe fn vld1q_f32_x4(a: *const f32) -> float32x4x4_t { #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld1_lane_f16(ptr: *const f16, src: float16x4_t) -> float16x4_t { static_assert_uimm_bits!(LANE, 2); simd_insert!(src, LANE as u32, *ptr) @@ -15750,6 +15836,7 @@ pub unsafe fn vld1_lane_f16(ptr: *const f16, src: float16x4_t) #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld1q_lane_f16(ptr: *const f16, src: float16x8_t) -> float16x8_t { static_assert_uimm_bits!(LANE, 3); simd_insert!(src, LANE as u32, *ptr) @@ -19490,6 +19577,7 @@ unsafe fn vld1q_v8i16(a: *const i8, b: i32) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] unsafe fn vld1_v4f16(a: *const i8, b: i32) -> float16x4_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v4f16")] @@ -19507,6 +19595,7 @@ unsafe fn vld1_v4f16(a: *const i8, b: i32) -> float16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] unsafe fn vld1q_v8f16(a: *const i8, b: i32) -> float16x8_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v8f16")] @@ -19548,6 +19637,7 @@ pub unsafe fn vld1q_dup_p64(ptr: *const p64) -> poly64x2_t { #[target_feature(enable = "neon,fp16")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld2_dup_f16(a: *const f16) -> float16x4x2_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4f16.p0")] @@ -19565,6 +19655,7 @@ pub unsafe fn vld2_dup_f16(a: *const f16) -> float16x4x2_t { #[target_feature(enable = "neon,fp16")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld2q_dup_f16(a: *const f16) -> float16x8x2_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8f16.p0")] @@ -19584,6 +19675,7 @@ pub unsafe fn vld2q_dup_f16(a: *const f16) -> float16x8x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld2_dup_f16(a: *const f16) -> float16x4x2_t { unsafe extern "unadjusted" { #[cfg_attr( @@ -19606,6 +19698,7 @@ pub unsafe fn vld2_dup_f16(a: *const f16) -> float16x4x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld2q_dup_f16(a: *const f16) -> float16x8x2_t { unsafe extern "unadjusted" { #[cfg_attr( @@ -20521,6 +20614,7 @@ pub unsafe fn vld2q_dup_p16(a: *const p16) -> poly16x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld2_f16(a: *const f16) -> float16x4x2_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4f16.p0")] @@ -20538,6 +20632,7 @@ pub unsafe fn vld2_f16(a: *const f16) -> float16x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld2q_f16(a: *const f16) -> float16x8x2_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8f16.p0")] @@ -20557,6 +20652,7 @@ pub unsafe fn vld2q_f16(a: *const f16) -> float16x8x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld2_f16(a: *const f16) -> float16x4x2_t { unsafe extern "unadjusted" { #[cfg_attr( @@ -20579,6 +20675,7 @@ pub unsafe fn vld2_f16(a: *const f16) -> float16x4x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld2q_f16(a: *const f16) -> float16x8x2_t { unsafe extern "unadjusted" { #[cfg_attr( @@ -20880,6 +20977,7 @@ pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld2_lane_f16(a: *const f16, b: float16x4x2_t) -> float16x4x2_t { static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { @@ -20905,6 +21003,7 @@ pub unsafe fn vld2_lane_f16(a: *const f16, b: float16x4x2_t) -> #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld2q_lane_f16(a: *const f16, b: float16x8x2_t) -> float16x8x2_t { static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { @@ -20932,6 +21031,7 @@ pub unsafe fn vld2q_lane_f16(a: *const f16, b: float16x8x2_t) - #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld2_lane_f16(a: *const f16, b: float16x4x2_t) -> float16x4x2_t { static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { @@ -20957,6 +21057,7 @@ pub unsafe fn vld2_lane_f16(a: *const f16, b: float16x4x2_t) -> #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld2q_lane_f16(a: *const f16, b: float16x8x2_t) -> float16x8x2_t { static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { @@ -22109,6 +22210,7 @@ pub unsafe fn vld2q_p16(a: *const p16) -> poly16x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld3_dup_f16(a: *const f16) -> float16x4x3_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4f16.p0")] @@ -22126,6 +22228,7 @@ pub unsafe fn vld3_dup_f16(a: *const f16) -> float16x4x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld3q_dup_f16(a: *const f16) -> float16x8x3_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8f16.p0")] @@ -22145,6 +22248,7 @@ pub unsafe fn vld3q_dup_f16(a: *const f16) -> float16x8x3_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld3_dup_f16(a: *const f16) -> float16x4x3_t { unsafe extern "unadjusted" { #[cfg_attr( @@ -22167,6 +22271,7 @@ pub unsafe fn vld3_dup_f16(a: *const f16) -> float16x4x3_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld3q_dup_f16(a: *const f16) -> float16x8x3_t { unsafe extern "unadjusted" { #[cfg_attr( @@ -23104,6 +23209,7 @@ pub unsafe fn vld3q_dup_p16(a: *const p16) -> poly16x8x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld3_f16(a: *const f16) -> float16x4x3_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4f16.p0")] @@ -23121,6 +23227,7 @@ pub unsafe fn vld3_f16(a: *const f16) -> float16x4x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld3q_f16(a: *const f16) -> float16x8x3_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8f16.p0")] @@ -23140,6 +23247,7 @@ pub unsafe fn vld3q_f16(a: *const f16) -> float16x8x3_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld3_f16(a: *const f16) -> float16x4x3_t { unsafe extern "unadjusted" { #[cfg_attr( @@ -23162,6 +23270,7 @@ pub unsafe fn vld3_f16(a: *const f16) -> float16x4x3_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld3q_f16(a: *const f16) -> float16x8x3_t { unsafe extern "unadjusted" { #[cfg_attr( @@ -23463,6 +23572,7 @@ pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld3_lane_f16(a: *const f16, b: float16x4x3_t) -> float16x4x3_t { static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { @@ -23489,6 +23599,7 @@ pub unsafe fn vld3_lane_f16(a: *const f16, b: float16x4x3_t) -> #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld3q_lane_f16(a: *const f16, b: float16x8x3_t) -> float16x8x3_t { static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { @@ -23517,6 +23628,7 @@ pub unsafe fn vld3q_lane_f16(a: *const f16, b: float16x8x3_t) - #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld3_lane_f16(a: *const f16, b: float16x4x3_t) -> float16x4x3_t { static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { @@ -23547,6 +23659,7 @@ pub unsafe fn vld3_lane_f16(a: *const f16, b: float16x4x3_t) -> #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld3q_lane_f16(a: *const f16, b: float16x8x3_t) -> float16x8x3_t { static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { @@ -24775,6 +24888,7 @@ pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) - #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld4_dup_f16(a: *const f16) -> float16x4x4_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4f16.p0")] @@ -24792,6 +24906,7 @@ pub unsafe fn vld4_dup_f16(a: *const f16) -> float16x4x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld4q_dup_f16(a: *const f16) -> float16x8x4_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8f16.p0")] @@ -24811,6 +24926,7 @@ pub unsafe fn vld4q_dup_f16(a: *const f16) -> float16x8x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld4_dup_f16(a: *const f16) -> float16x4x4_t { unsafe extern "unadjusted" { #[cfg_attr( @@ -24833,6 +24949,7 @@ pub unsafe fn vld4_dup_f16(a: *const f16) -> float16x4x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld4q_dup_f16(a: *const f16) -> float16x8x4_t { unsafe extern "unadjusted" { #[cfg_attr( @@ -25792,6 +25909,7 @@ pub unsafe fn vld4q_dup_p16(a: *const p16) -> poly16x8x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld4_f16(a: *const f16) -> float16x4x4_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4f16.p0")] @@ -25809,6 +25927,7 @@ pub unsafe fn vld4_f16(a: *const f16) -> float16x4x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld4q_f16(a: *const f16) -> float16x8x4_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8f16.p0")] @@ -25828,6 +25947,7 @@ pub unsafe fn vld4q_f16(a: *const f16) -> float16x8x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld4_f16(a: *const f16) -> float16x4x4_t { unsafe extern "unadjusted" { #[cfg_attr( @@ -25850,6 +25970,7 @@ pub unsafe fn vld4_f16(a: *const f16) -> float16x4x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld4q_f16(a: *const f16) -> float16x8x4_t { unsafe extern "unadjusted" { #[cfg_attr( @@ -26151,6 +26272,7 @@ pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld4_lane_f16(a: *const f16, b: float16x4x4_t) -> float16x4x4_t { static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { @@ -26178,6 +26300,7 @@ pub unsafe fn vld4_lane_f16(a: *const f16, b: float16x4x4_t) -> #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld4q_lane_f16(a: *const f16, b: float16x8x4_t) -> float16x8x4_t { static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { @@ -26207,6 +26330,7 @@ pub unsafe fn vld4q_lane_f16(a: *const f16, b: float16x8x4_t) - #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld4_lane_f16(a: *const f16, b: float16x4x4_t) -> float16x4x4_t { static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { @@ -26238,6 +26362,7 @@ pub unsafe fn vld4_lane_f16(a: *const f16, b: float16x4x4_t) -> #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vld4q_lane_f16(a: *const f16, b: float16x8x4_t) -> float16x8x4_t { static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { @@ -27527,6 +27652,7 @@ pub unsafe fn vldrq_p128(a: *const p128) -> p128 { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4f16")] @@ -27549,6 +27675,7 @@ pub fn vmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v8f16")] @@ -27917,6 +28044,7 @@ pub fn vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe { simd_fmax(a, b) } } @@ -27931,6 +28059,7 @@ pub fn vmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unsafe { simd_fmax(a, b) } } @@ -27987,6 +28116,7 @@ pub fn vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4f16")] @@ -28009,6 +28139,7 @@ pub fn vmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v8f16")] @@ -28377,6 +28508,7 @@ pub fn vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe { simd_fmin(a, b) } } @@ -28391,6 +28523,7 @@ pub fn vminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unsafe { simd_fmin(a, b) } } @@ -31573,6 +31706,7 @@ pub fn vmmlaq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vmov_n_f16(a: f16) -> float16x4_t { vdup_n_f16(a) } @@ -31587,6 +31721,7 @@ pub fn vmov_n_f16(a: f16) -> float16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vmovq_n_f16(a: f16) -> float16x8_t { vdupq_n_f16(a) } @@ -32315,6 +32450,7 @@ pub fn vmovn_u64(a: uint64x2_t) -> uint32x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vmul_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe { simd_mul(a, b) } } @@ -32329,6 +32465,7 @@ pub fn vmul_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vmulq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unsafe { simd_mul(a, b) } } @@ -32386,6 +32523,7 @@ pub fn vmulq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vmul_lane_f16(a: float16x4_t, v: float16x4_t) -> float16x4_t { static_assert_uimm_bits!(LANE, 2); unsafe { @@ -32407,6 +32545,7 @@ pub fn vmul_lane_f16(a: float16x4_t, v: float16x4_t) -> float16 #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vmulq_lane_f16(a: float16x8_t, v: float16x4_t) -> float16x8_t { static_assert_uimm_bits!(LANE, 2); unsafe { @@ -33022,6 +33161,7 @@ pub fn vmulq_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vmul_n_f16(a: float16x4_t, b: f16) -> float16x4_t { unsafe { simd_mul(a, vdup_n_f16(b)) } } @@ -33036,6 +33176,7 @@ pub fn vmul_n_f16(a: float16x4_t, b: f16) -> float16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vmulq_n_f16(a: float16x8_t, b: f16) -> float16x8_t { unsafe { simd_mul(a, vdupq_n_f16(b)) } } @@ -34369,6 +34510,7 @@ pub fn vmvnq_u8(a: uint8x16_t) -> uint8x16_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vneg_f16(a: float16x4_t) -> float16x4_t { unsafe { simd_neg(a) } } @@ -34383,6 +34525,7 @@ pub fn vneg_f16(a: float16x4_t) -> float16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vnegq_f16(a: float16x8_t) -> float16x8_t { unsafe { simd_neg(a) } } @@ -35613,6 +35756,7 @@ pub fn vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vpadd_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v4f16")] @@ -36834,15 +36978,7 @@ pub fn vqabsq_s32(a: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v8i8")] - fn _vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - unsafe { _vqadd_s8(a, b) } + unsafe { simd_saturating_add(a, b) } } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s8)"] @@ -36863,15 +36999,7 @@ pub fn vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v16i8")] - fn _vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - unsafe { _vqaddq_s8(a, b) } + unsafe { simd_saturating_add(a, b) } } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s16)"] @@ -36892,15 +37020,7 @@ pub fn vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v4i16")] - fn _vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - unsafe { _vqadd_s16(a, b) } + unsafe { simd_saturating_add(a, b) } } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s16)"] @@ -36921,15 +37041,7 @@ pub fn vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v8i16")] - fn _vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - unsafe { _vqaddq_s16(a, b) } + unsafe { simd_saturating_add(a, b) } } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s32)"] @@ -36950,15 +37062,7 @@ pub fn vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v2i32")] - fn _vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - unsafe { _vqadd_s32(a, b) } + unsafe { simd_saturating_add(a, b) } } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s32)"] @@ -36979,15 +37083,7 @@ pub fn vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v4i32")] - fn _vqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - unsafe { _vqaddq_s32(a, b) } + unsafe { simd_saturating_add(a, b) } } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s64)"] @@ -37008,15 +37104,7 @@ pub fn vqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vqadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.v1i64" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v1i64")] - fn _vqadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - } - unsafe { _vqadd_s64(a, b) } + unsafe { simd_saturating_add(a, b) } } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s64)"] @@ -37037,15 +37125,7 @@ pub fn vqadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqadd.v2i64" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v2i64")] - fn _vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - unsafe { _vqaddq_s64(a, b) } + unsafe { simd_saturating_add(a, b) } } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u8)"] @@ -37066,15 +37146,7 @@ pub fn vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vqadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v8i8")] - fn _vqadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - } - unsafe { _vqadd_u8(a, b) } + unsafe { simd_saturating_add(a, b) } } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u8)"] @@ -37095,15 +37167,7 @@ pub fn vqadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vqaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v16i8")] - fn _vqaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - } - unsafe { _vqaddq_u8(a, b) } + unsafe { simd_saturating_add(a, b) } } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u16)"] @@ -37124,15 +37188,7 @@ pub fn vqaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vqadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v4i16")] - fn _vqadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - } - unsafe { _vqadd_u16(a, b) } + unsafe { simd_saturating_add(a, b) } } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u16)"] @@ -37153,15 +37209,7 @@ pub fn vqadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vqaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v8i16")] - fn _vqaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - } - unsafe { _vqaddq_u16(a, b) } + unsafe { simd_saturating_add(a, b) } } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u32)"] @@ -37182,15 +37230,7 @@ pub fn vqaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vqadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v2i32")] - fn _vqadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - } - unsafe { _vqadd_u32(a, b) } + unsafe { simd_saturating_add(a, b) } } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u32)"] @@ -37211,15 +37251,7 @@ pub fn vqadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vqaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v4i32")] - fn _vqaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - } - unsafe { _vqaddq_u32(a, b) } + unsafe { simd_saturating_add(a, b) } } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u64)"] @@ -37240,15 +37272,7 @@ pub fn vqaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vqadd_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.v1i64" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v1i64")] - fn _vqadd_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t; - } - unsafe { _vqadd_u64(a, b) } + unsafe { simd_saturating_add(a, b) } } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u64)"] @@ -37269,15 +37293,7 @@ pub fn vqadd_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vqaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqadd.v2i64" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v2i64")] - fn _vqaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; - } - unsafe { _vqaddq_u64(a, b) } + unsafe { simd_saturating_add(a, b) } } #[doc = "Vector widening saturating doubling multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_lane_s16)"] @@ -41115,15 +41131,7 @@ pub fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v8i8")] - fn _vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - unsafe { _vqsub_s8(a, b) } + unsafe { simd_saturating_sub(a, b) } } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s8)"] @@ -41144,15 +41152,7 @@ pub fn vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v16i8")] - fn _vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - unsafe { _vqsubq_s8(a, b) } + unsafe { simd_saturating_sub(a, b) } } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s16)"] @@ -41173,15 +41173,7 @@ pub fn vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v4i16")] - fn _vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - unsafe { _vqsub_s16(a, b) } + unsafe { simd_saturating_sub(a, b) } } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s16)"] @@ -41202,15 +41194,7 @@ pub fn vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v8i16")] - fn _vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - unsafe { _vqsubq_s16(a, b) } + unsafe { simd_saturating_sub(a, b) } } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s32)"] @@ -41231,15 +41215,7 @@ pub fn vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v2i32")] - fn _vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - unsafe { _vqsub_s32(a, b) } + unsafe { simd_saturating_sub(a, b) } } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s32)"] @@ -41260,15 +41236,7 @@ pub fn vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v4i32")] - fn _vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - unsafe { _vqsubq_s32(a, b) } + unsafe { simd_saturating_sub(a, b) } } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s64)"] @@ -41289,15 +41257,7 @@ pub fn vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vqsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v1i64" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v1i64")] - fn _vqsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - } - unsafe { _vqsub_s64(a, b) } + unsafe { simd_saturating_sub(a, b) } } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s64)"] @@ -41318,15 +41278,7 @@ pub fn vqsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqsub.v2i64" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v2i64")] - fn _vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - unsafe { _vqsubq_s64(a, b) } + unsafe { simd_saturating_sub(a, b) } } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u8)"] @@ -41347,15 +41299,7 @@ pub fn vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v8i8")] - fn _vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - } - unsafe { _vqsub_u8(a, b) } + unsafe { simd_saturating_sub(a, b) } } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u8)"] @@ -41376,15 +41320,7 @@ pub fn vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v16i8")] - fn _vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - } - unsafe { _vqsubq_u8(a, b) } + unsafe { simd_saturating_sub(a, b) } } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u16)"] @@ -41405,15 +41341,7 @@ pub fn vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v4i16")] - fn _vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - } - unsafe { _vqsub_u16(a, b) } + unsafe { simd_saturating_sub(a, b) } } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u16)"] @@ -41434,15 +41362,7 @@ pub fn vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v8i16")] - fn _vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - } - unsafe { _vqsubq_u16(a, b) } + unsafe { simd_saturating_sub(a, b) } } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u32)"] @@ -41463,15 +41383,7 @@ pub fn vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v2i32")] - fn _vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - } - unsafe { _vqsub_u32(a, b) } + unsafe { simd_saturating_sub(a, b) } } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u32)"] @@ -41492,15 +41404,7 @@ pub fn vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v4i32")] - fn _vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - } - unsafe { _vqsubq_u32(a, b) } + unsafe { simd_saturating_sub(a, b) } } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u64)"] @@ -41521,15 +41425,7 @@ pub fn vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vqsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v1i64" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v1i64")] - fn _vqsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t; - } - unsafe { _vqsub_u64(a, b) } + unsafe { simd_saturating_sub(a, b) } } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u64)"] @@ -41550,15 +41446,7 @@ pub fn vqsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqsub.v2i64" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v2i64")] - fn _vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; - } - unsafe { _vqsubq_u64(a, b) } + unsafe { simd_saturating_sub(a, b) } } #[doc = "Rounding Add returning High Narrow (high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s16)"] @@ -41943,6 +41831,7 @@ pub fn vraddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vrecpe_f16(a: float16x4_t) -> float16x4_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v4f16")] @@ -41965,6 +41854,7 @@ pub fn vrecpe_f16(a: float16x4_t) -> float16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vrecpeq_f16(a: float16x8_t) -> float16x8_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v8f16")] @@ -42103,6 +41993,7 @@ pub fn vrecpeq_u32(a: uint32x4_t) -> uint32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vrecps_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v4f16")] @@ -42125,6 +42016,7 @@ pub fn vrecps_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vrecpsq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v8f16")] @@ -42197,7 +42089,6 @@ pub fn vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f16)"] #[inline] -#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42206,13 +42097,13 @@ pub fn vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpret_f32_f16(a: float16x4_t) -> float32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f16)"] #[inline] -#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42221,17 +42112,13 @@ pub fn vreinterpret_f32_f16(a: float16x4_t) -> float32x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f32_f16(a: float16x4_t) -> float32x2_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_s8_f16(a: float16x4_t) -> int8x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f16)"] #[inline] -#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42240,13 +42127,13 @@ pub fn vreinterpret_f32_f16(a: float16x4_t) -> float32x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_s8_f16(a: float16x4_t) -> int8x8_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_s16_f16(a: float16x4_t) -> int16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f16)"] #[inline] -#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42255,17 +42142,13 @@ pub fn vreinterpret_s8_f16(a: float16x4_t) -> int8x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_s8_f16(a: float16x4_t) -> int8x8_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_s32_f16(a: float16x4_t) -> int32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f16)"] #[inline] -#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42274,13 +42157,13 @@ pub fn vreinterpret_s8_f16(a: float16x4_t) -> int8x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_s16_f16(a: float16x4_t) -> int16x4_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_s64_f16(a: float16x4_t) -> int64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f16)"] #[inline] -#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42289,17 +42172,13 @@ pub fn vreinterpret_s16_f16(a: float16x4_t) -> int16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_s16_f16(a: float16x4_t) -> int16x4_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_u8_f16(a: float16x4_t) -> uint8x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f16)"] #[inline] -#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42308,13 +42187,13 @@ pub fn vreinterpret_s16_f16(a: float16x4_t) -> int16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_s32_f16(a: float16x4_t) -> int32x2_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_u16_f16(a: float16x4_t) -> uint16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f16)"] #[inline] -#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42323,17 +42202,13 @@ pub fn vreinterpret_s32_f16(a: float16x4_t) -> int32x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_s32_f16(a: float16x4_t) -> int32x2_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_u32_f16(a: float16x4_t) -> uint32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f16)"] #[inline] -#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42342,13 +42217,13 @@ pub fn vreinterpret_s32_f16(a: float16x4_t) -> int32x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_s64_f16(a: float16x4_t) -> int64x1_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_u64_f16(a: float16x4_t) -> uint64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f16)"] #[inline] -#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42357,14 +42232,13 @@ pub fn vreinterpret_s64_f16(a: float16x4_t) -> int64x1_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_s64_f16(a: float16x4_t) -> int64x1_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_p8_f16(a: float16x4_t) -> poly8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f16)"] #[inline] -#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42373,13 +42247,13 @@ pub fn vreinterpret_s64_f16(a: float16x4_t) -> int64x1_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_u8_f16(a: float16x4_t) -> uint8x8_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_p16_f16(a: float16x4_t) -> poly16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f16)"] #[inline] -#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42388,17 +42262,13 @@ pub fn vreinterpret_u8_f16(a: float16x4_t) -> uint8x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_u8_f16(a: float16x4_t) -> uint8x8_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_f32_f16(a: float16x8_t) -> float32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f16)"] #[inline] -#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42407,13 +42277,13 @@ pub fn vreinterpret_u8_f16(a: float16x4_t) -> uint8x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_u16_f16(a: float16x4_t) -> uint16x4_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_s8_f16(a: float16x8_t) -> int8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f16)"] #[inline] -#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42422,17 +42292,13 @@ pub fn vreinterpret_u16_f16(a: float16x4_t) -> uint16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_u16_f16(a: float16x4_t) -> uint16x4_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_s16_f16(a: float16x8_t) -> int16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f16)"] #[inline] -#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42441,13 +42307,13 @@ pub fn vreinterpret_u16_f16(a: float16x4_t) -> uint16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_u32_f16(a: float16x4_t) -> uint32x2_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_s32_f16(a: float16x8_t) -> int32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f16)"] #[inline] -#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42456,17 +42322,13 @@ pub fn vreinterpret_u32_f16(a: float16x4_t) -> uint32x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_u32_f16(a: float16x4_t) -> uint32x2_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_s64_f16(a: float16x8_t) -> int64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f16)"] #[inline] -#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42475,13 +42337,13 @@ pub fn vreinterpret_u32_f16(a: float16x4_t) -> uint32x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_u64_f16(a: float16x4_t) -> uint64x1_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_u8_f16(a: float16x8_t) -> uint8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f16)"] #[inline] -#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42490,14 +42352,13 @@ pub fn vreinterpret_u64_f16(a: float16x4_t) -> uint64x1_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_u64_f16(a: float16x4_t) -> uint64x1_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_u16_f16(a: float16x8_t) -> uint16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f16)"] #[inline] -#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42506,13 +42367,13 @@ pub fn vreinterpret_u64_f16(a: float16x4_t) -> uint64x1_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_p8_f16(a: float16x4_t) -> poly8x8_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_u32_f16(a: float16x8_t) -> uint32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f16)"] #[inline] -#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42521,17 +42382,13 @@ pub fn vreinterpret_p8_f16(a: float16x4_t) -> poly8x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_p8_f16(a: float16x4_t) -> poly8x8_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_u64_f16(a: float16x8_t) -> uint64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f16)"] #[inline] -#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42540,13 +42397,13 @@ pub fn vreinterpret_p8_f16(a: float16x4_t) -> poly8x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_p16_f16(a: float16x4_t) -> poly16x4_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_p8_f16(a: float16x8_t) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f16)"] #[inline] -#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42555,17 +42412,13 @@ pub fn vreinterpret_p16_f16(a: float16x4_t) -> poly16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_p16_f16(a: float16x4_t) -> poly16x4_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_p16_f16(a: float16x8_t) -> poly16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f32)"] #[inline] -#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42574,13 +42427,13 @@ pub fn vreinterpret_p16_f16(a: float16x4_t) -> poly16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f32_f16(a: float16x8_t) -> float32x4_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_f16_f32(a: float32x2_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f32)"] #[inline] -#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42589,17 +42442,13 @@ pub fn vreinterpretq_f32_f16(a: float16x8_t) -> float32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f32_f16(a: float16x8_t) -> float32x4_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_f16_f32(a: float32x4_t) -> float16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s8)"] #[inline] -#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42608,13 +42457,13 @@ pub fn vreinterpretq_f32_f16(a: float16x8_t) -> float32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_s8_f16(a: float16x8_t) -> int8x16_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_f16_s8(a: int8x8_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s8)"] #[inline] -#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42623,21 +42472,13 @@ pub fn vreinterpretq_s8_f16(a: float16x8_t) -> int8x16_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_s8_f16(a: float16x8_t) -> int8x16_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_f16_s8(a: int8x16_t) -> float16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s16)"] #[inline] -#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42646,13 +42487,13 @@ pub fn vreinterpretq_s8_f16(a: float16x8_t) -> int8x16_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_s16_f16(a: float16x8_t) -> int16x8_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_f16_s16(a: int16x4_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s16)"] #[inline] -#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42661,17 +42502,13 @@ pub fn vreinterpretq_s16_f16(a: float16x8_t) -> int16x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_s16_f16(a: float16x8_t) -> int16x8_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_f16_s16(a: int16x8_t) -> float16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s32)"] #[inline] -#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42680,13 +42517,13 @@ pub fn vreinterpretq_s16_f16(a: float16x8_t) -> int16x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_s32_f16(a: float16x8_t) -> int32x4_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_f16_s32(a: int32x2_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s32)"] #[inline] -#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42695,17 +42532,13 @@ pub fn vreinterpretq_s32_f16(a: float16x8_t) -> int32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_s32_f16(a: float16x8_t) -> int32x4_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_f16_s32(a: int32x4_t) -> float16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s64)"] #[inline] -#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42714,13 +42547,13 @@ pub fn vreinterpretq_s32_f16(a: float16x8_t) -> int32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_s64_f16(a: float16x8_t) -> int64x2_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_f16_s64(a: int64x1_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s64)"] #[inline] -#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42729,17 +42562,13 @@ pub fn vreinterpretq_s64_f16(a: float16x8_t) -> int64x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_s64_f16(a: float16x8_t) -> int64x2_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_f16_s64(a: int64x2_t) -> float16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u8)"] #[inline] -#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42748,13 +42577,13 @@ pub fn vreinterpretq_s64_f16(a: float16x8_t) -> int64x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_u8_f16(a: float16x8_t) -> uint8x16_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_f16_u8(a: uint8x8_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u8)"] #[inline] -#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42763,21 +42592,13 @@ pub fn vreinterpretq_u8_f16(a: float16x8_t) -> uint8x16_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_u8_f16(a: float16x8_t) -> uint8x16_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_f16_u8(a: uint8x16_t) -> float16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u16)"] #[inline] -#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42786,13 +42607,13 @@ pub fn vreinterpretq_u8_f16(a: float16x8_t) -> uint8x16_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_u16_f16(a: float16x8_t) -> uint16x8_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_f16_u16(a: uint16x4_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u16)"] #[inline] -#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42801,17 +42622,13 @@ pub fn vreinterpretq_u16_f16(a: float16x8_t) -> uint16x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_u16_f16(a: float16x8_t) -> uint16x8_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_f16_u16(a: uint16x8_t) -> float16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u32)"] #[inline] -#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42820,13 +42637,13 @@ pub fn vreinterpretq_u16_f16(a: float16x8_t) -> uint16x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_u32_f16(a: float16x8_t) -> uint32x4_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_f16_u32(a: uint32x2_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u32)"] #[inline] -#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42835,17 +42652,13 @@ pub fn vreinterpretq_u32_f16(a: float16x8_t) -> uint32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_u32_f16(a: float16x8_t) -> uint32x4_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_f16_u32(a: uint32x4_t) -> float16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u64)"] #[inline] -#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42854,13 +42667,13 @@ pub fn vreinterpretq_u32_f16(a: float16x8_t) -> uint32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_u64_f16(a: float16x8_t) -> uint64x2_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_f16_u64(a: uint64x1_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u64)"] #[inline] -#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42869,17 +42682,13 @@ pub fn vreinterpretq_u64_f16(a: float16x8_t) -> uint64x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_u64_f16(a: float16x8_t) -> uint64x2_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_f16_u64(a: uint64x2_t) -> float16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p8)"] #[inline] -#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42888,13 +42697,13 @@ pub fn vreinterpretq_u64_f16(a: float16x8_t) -> uint64x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_p8_f16(a: float16x8_t) -> poly8x16_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_f16_p8(a: poly8x8_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p8)"] #[inline] -#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42903,21 +42712,13 @@ pub fn vreinterpretq_p8_f16(a: float16x8_t) -> poly8x16_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_p8_f16(a: float16x8_t) -> poly8x16_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_f16_p8(a: poly8x16_t) -> float16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p16)"] #[inline] -#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42926,13 +42727,13 @@ pub fn vreinterpretq_p8_f16(a: float16x8_t) -> poly8x16_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_p16_f16(a: float16x8_t) -> poly16x8_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_f16_p16(a: poly16x4_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p16)"] #[inline] -#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( @@ -42941,18 +42742,14 @@ pub fn vreinterpretq_p16_f16(a: float16x8_t) -> poly16x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_p16_f16(a: float16x8_t) -> poly16x8_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_f16_p16(a: poly16x8_t) -> float16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p128)"] #[inline] -#[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -42960,14 +42757,14 @@ pub fn vreinterpretq_p16_f16(a: float16x8_t) -> poly16x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_f32(a: float32x2_t) -> float16x4_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_f16_p128(a: p128) -> float16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f16)"] #[inline] -#[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -42975,18 +42772,14 @@ pub fn vreinterpret_f16_f32(a: float32x2_t) -> float16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_f32(a: float32x2_t) -> float16x4_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: float16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_p64_f16(a: float16x4_t) -> poly64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f16)"] #[inline] -#[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -42994,14 +42787,14 @@ pub fn vreinterpret_f16_f32(a: float32x2_t) -> float16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_f32(a: float32x4_t) -> float16x8_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_p128_f16(a: float16x8_t) -> p128 { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f16)"] #[inline] -#[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -43009,7338 +42802,43 @@ pub fn vreinterpretq_f16_f32(a: float32x4_t) -> float16x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_f32(a: float32x4_t) -> float16x8_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: float16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s8)"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_s8(a: int8x8_t) -> float16x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s8)"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_s8(a: int8x8_t) -> float16x4_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: float16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s8)"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_s8(a: int8x16_t) -> float16x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s8)"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_s8(a: int8x16_t) -> float16x8_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: float16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s16)"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_s16(a: int16x4_t) -> float16x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s16)"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_s16(a: int16x4_t) -> float16x4_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: float16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s16)"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_s16(a: int16x8_t) -> float16x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s16)"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_s16(a: int16x8_t) -> float16x8_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: float16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s32)"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_s32(a: int32x2_t) -> float16x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s32)"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_s32(a: int32x2_t) -> float16x4_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: float16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s32)"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_s32(a: int32x4_t) -> float16x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s32)"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_s32(a: int32x4_t) -> float16x8_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: float16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s64)"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_s64(a: int64x1_t) -> float16x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s64)"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_s64(a: int64x1_t) -> float16x4_t { - unsafe { - let ret_val: float16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s64)"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_s64(a: int64x2_t) -> float16x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s64)"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_s64(a: int64x2_t) -> float16x8_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: float16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u8)"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_u8(a: uint8x8_t) -> float16x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u8)"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_u8(a: uint8x8_t) -> float16x4_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: float16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u8)"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_u8(a: uint8x16_t) -> float16x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u8)"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_u8(a: uint8x16_t) -> float16x8_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: float16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u16)"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_u16(a: uint16x4_t) -> float16x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u16)"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_u16(a: uint16x4_t) -> float16x4_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: float16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u16)"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_u16(a: uint16x8_t) -> float16x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u16)"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_u16(a: uint16x8_t) -> float16x8_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: float16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u32)"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_u32(a: uint32x2_t) -> float16x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u32)"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_u32(a: uint32x2_t) -> float16x4_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: float16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u32)"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_u32(a: uint32x4_t) -> float16x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u32)"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_u32(a: uint32x4_t) -> float16x8_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: float16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u64)"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_u64(a: uint64x1_t) -> float16x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u64)"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_u64(a: uint64x1_t) -> float16x4_t { - unsafe { - let ret_val: float16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u64)"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_u64(a: uint64x2_t) -> float16x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u64)"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_u64(a: uint64x2_t) -> float16x8_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: float16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p8)"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_p8(a: poly8x8_t) -> float16x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p8)"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_p8(a: poly8x8_t) -> float16x4_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: float16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p8)"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_p8(a: poly8x16_t) -> float16x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p8)"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_p8(a: poly8x16_t) -> float16x8_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: float16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p16)"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_p16(a: poly16x4_t) -> float16x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p16)"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_p16(a: poly16x4_t) -> float16x4_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: float16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p16)"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_p16(a: poly16x8_t) -> float16x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p16)"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_p16(a: poly16x8_t) -> float16x8_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: float16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p128)"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_p128(a: p128) -> float16x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p128)"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_p128(a: p128) -> float16x8_t { - unsafe { - let ret_val: float16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f16)"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_p64_f16(a: float16x4_t) -> poly64x1_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f16)"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_p64_f16(a: float16x4_t) -> poly64x1_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f16)"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_p128_f16(a: float16x8_t) -> p128 { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f16)"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_p128_f16(a: float16x8_t) -> p128 { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f16)"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_p64_f16(a: float16x8_t) -> poly64x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f16)"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_p64_f16(a: float16x8_t) -> poly64x2_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p64)"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_p64(a: poly64x1_t) -> float16x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p64)"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpret_f16_p64(a: poly64x1_t) -> float16x4_t { - unsafe { - let ret_val: float16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p64)"] -#[inline] -#[cfg(target_endian = "little")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_p64(a: poly64x2_t) -> float16x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p64)"] -#[inline] -#[cfg(target_endian = "big")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -pub fn vreinterpretq_f16_p64(a: poly64x2_t) -> float16x8_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: float16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p128)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_f32_p128(a: p128) -> float32x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p128)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_f32_p128(a: p128) -> float32x4_t { - unsafe { - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s16)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s16)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s16)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s16)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s16)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s16)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s16)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s16)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s16)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s16)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s16)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s16)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s16)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s16)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s16)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s16)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s16)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s16)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s16)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s16)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s32)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s64)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t { - unsafe { - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s64)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t { - unsafe { - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s64)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { - unsafe { - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s64)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { - unsafe { - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s64)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { - unsafe { - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s64)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { - unsafe { - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s64)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { - unsafe { - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s64)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u64_s64(a: int64x1_t) -> uint64x1_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s64)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { - unsafe { - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s64)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { - unsafe { - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s64)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s64)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s64)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s64)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s64)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s64)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s64)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s64)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s64)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s64)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u8)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u16)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u16)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u16)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u16)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u16)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u16)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u16)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u16)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u16)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u16)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_p64_f16(a: float16x8_t) -> poly64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p64)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_f16_p64(a: poly64x1_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p64)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(nop) )] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_f16_p64(a: poly64x2_t) -> float16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p128)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -50356,39 +42854,12 @@ pub fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { +pub fn vreinterpretq_f32_p128(a: p128) -> float32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -50404,39 +42875,12 @@ pub fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { +pub fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -50452,39 +42896,12 @@ pub fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { +pub fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -50500,43 +42917,12 @@ pub fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { +pub fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -50552,39 +42938,12 @@ pub fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { +pub fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -50600,39 +42959,12 @@ pub fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { +pub fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -50648,43 +42980,12 @@ pub fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { +pub fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -50700,13 +43001,12 @@ pub fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { +pub fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -50722,17 +43022,12 @@ pub fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -50748,13 +43043,12 @@ pub fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { +pub fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -50770,17 +43064,12 @@ pub fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -50796,13 +43085,12 @@ pub fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { +pub fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -50818,17 +43106,12 @@ pub fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -50844,13 +43127,12 @@ pub fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { +pub fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -50866,17 +43148,12 @@ pub fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -50892,13 +43169,12 @@ pub fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { +pub fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -50914,17 +43190,12 @@ pub fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -50940,13 +43211,12 @@ pub fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { +pub fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -50962,14 +43232,12 @@ pub fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -50985,13 +43253,12 @@ pub fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { +pub fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51007,17 +43274,12 @@ pub fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51033,13 +43295,12 @@ pub fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { +pub fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51055,17 +43316,12 @@ pub fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51081,13 +43337,12 @@ pub fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { +pub fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51103,14 +43358,12 @@ pub fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51126,13 +43379,12 @@ pub fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { +pub fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51148,17 +43400,12 @@ pub fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51174,13 +43421,12 @@ pub fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { +pub fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51196,17 +43442,12 @@ pub fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51222,13 +43463,12 @@ pub fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { +pub fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51244,17 +43484,12 @@ pub fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51270,13 +43505,12 @@ pub fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { +pub fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51292,21 +43526,12 @@ pub fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51322,13 +43547,12 @@ pub fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { +pub fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51344,17 +43568,12 @@ pub fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51370,13 +43589,12 @@ pub fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { +pub fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51392,17 +43610,12 @@ pub fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51418,13 +43631,12 @@ pub fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { +pub fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51440,17 +43652,12 @@ pub fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51466,13 +43673,12 @@ pub fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { +pub fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51488,21 +43694,12 @@ pub fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51518,13 +43715,12 @@ pub fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { +pub fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51540,17 +43736,12 @@ pub fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51566,13 +43757,12 @@ pub fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { +pub fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51588,17 +43778,12 @@ pub fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51614,13 +43799,12 @@ pub fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { +pub fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51636,21 +43820,12 @@ pub fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51666,13 +43841,12 @@ pub fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { +pub fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51688,17 +43862,12 @@ pub fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51714,13 +43883,12 @@ pub fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { +pub fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51736,16 +43904,12 @@ pub fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { - unsafe { - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51761,13 +43925,12 @@ pub fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { +pub fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51783,16 +43946,12 @@ pub fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { - unsafe { - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51808,13 +43967,12 @@ pub fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { +pub fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51830,16 +43988,12 @@ pub fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { - unsafe { - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51855,13 +44009,12 @@ pub fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { +pub fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51877,14 +44030,11 @@ pub fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { - unsafe { - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51901,13 +44051,12 @@ pub fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_u64(a: uint64x1_t) -> int64x1_t { +pub fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51923,13 +44072,12 @@ pub fn vreinterpret_s64_u64(a: uint64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { +pub fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51945,16 +44093,12 @@ pub fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { - unsafe { - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51970,13 +44114,12 @@ pub fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { +pub fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -51992,16 +44135,12 @@ pub fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { - unsafe { - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52017,13 +44156,12 @@ pub fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { +pub fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52039,16 +44177,12 @@ pub fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { - unsafe { - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52064,13 +44198,12 @@ pub fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { +pub fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52086,16 +44219,12 @@ pub fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { - unsafe { - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52111,13 +44240,12 @@ pub fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { +pub fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52133,16 +44261,12 @@ pub fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { - unsafe { - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52158,13 +44282,12 @@ pub fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { +pub fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52180,17 +44303,12 @@ pub fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52206,13 +44324,12 @@ pub fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { +pub fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52228,21 +44345,12 @@ pub fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52258,13 +44366,12 @@ pub fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { +pub fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52280,17 +44387,12 @@ pub fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52306,13 +44408,12 @@ pub fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { +pub fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52328,17 +44429,12 @@ pub fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52354,13 +44450,12 @@ pub fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { +pub fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52376,17 +44471,12 @@ pub fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52402,13 +44492,12 @@ pub fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { +pub fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52424,21 +44513,12 @@ pub fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52454,13 +44534,12 @@ pub fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { +pub fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52476,17 +44555,12 @@ pub fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52502,13 +44576,12 @@ pub fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { +pub fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52524,17 +44597,12 @@ pub fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52550,13 +44618,12 @@ pub fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { +pub fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52572,21 +44639,12 @@ pub fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52602,13 +44660,12 @@ pub fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { +pub fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52624,17 +44681,12 @@ pub fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52650,13 +44702,12 @@ pub fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { +pub fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52672,17 +44723,12 @@ pub fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_u64_s64(a: int64x1_t) -> uint64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52698,13 +44744,12 @@ pub fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { +pub fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52720,17 +44765,12 @@ pub fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52746,13 +44786,12 @@ pub fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { +pub fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52768,17 +44807,12 @@ pub fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52794,13 +44828,12 @@ pub fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { +pub fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52816,17 +44849,12 @@ pub fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52842,13 +44870,12 @@ pub fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { +pub fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52864,14 +44891,12 @@ pub fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52887,13 +44912,12 @@ pub fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { +pub fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52909,17 +44933,12 @@ pub fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52935,13 +44954,12 @@ pub fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { +pub fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52957,17 +44975,12 @@ pub fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -52983,13 +44996,12 @@ pub fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { +pub fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53005,17 +45017,12 @@ pub fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53031,13 +45038,12 @@ pub fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { +pub fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53053,14 +45059,12 @@ pub fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53076,13 +45080,12 @@ pub fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { +pub fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53098,17 +45101,12 @@ pub fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53124,13 +45122,12 @@ pub fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { +pub fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53146,18 +45143,12 @@ pub fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53173,13 +45164,12 @@ pub fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { +pub fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53195,22 +45185,12 @@ pub fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53226,13 +45206,12 @@ pub fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { +pub fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53248,18 +45227,12 @@ pub fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53275,13 +45248,12 @@ pub fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { +pub fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53297,18 +45269,12 @@ pub fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53324,13 +45290,12 @@ pub fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { +pub fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53346,18 +45311,12 @@ pub fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53373,13 +45332,12 @@ pub fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { +pub fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53395,22 +45353,12 @@ pub fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53426,13 +45374,12 @@ pub fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { +pub fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53448,18 +45395,12 @@ pub fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53475,13 +45416,12 @@ pub fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { +pub fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53497,18 +45437,12 @@ pub fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53524,13 +45458,12 @@ pub fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { +pub fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53546,18 +45479,12 @@ pub fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53573,13 +45500,12 @@ pub fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { +pub fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53595,18 +45521,12 @@ pub fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53622,13 +45542,12 @@ pub fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { +pub fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53644,17 +45563,12 @@ pub fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53670,13 +45584,12 @@ pub fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { +pub fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53692,17 +45605,12 @@ pub fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53718,13 +45626,12 @@ pub fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { +pub fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53740,17 +45647,12 @@ pub fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53766,13 +45668,12 @@ pub fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { +pub fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53788,17 +45689,12 @@ pub fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53814,13 +45710,12 @@ pub fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { +pub fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53836,14 +45731,12 @@ pub fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53859,13 +45752,12 @@ pub fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { +pub fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53881,17 +45773,12 @@ pub fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53907,13 +45794,12 @@ pub fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { +pub fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53929,17 +45815,12 @@ pub fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53955,13 +45836,12 @@ pub fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { +pub fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -53977,17 +45857,12 @@ pub fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -54003,13 +45878,12 @@ pub fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { +pub fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -54025,14 +45899,12 @@ pub fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -54048,13 +45920,12 @@ pub fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { +pub fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -54070,17 +45941,12 @@ pub fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -54096,13 +45962,12 @@ pub fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { +pub fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -54118,17 +45983,12 @@ pub fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -54144,13 +46004,12 @@ pub fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { +pub fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -54166,21 +46025,12 @@ pub fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -54196,13 +46046,12 @@ pub fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { +pub fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -54218,17 +46067,12 @@ pub fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -54244,13 +46088,12 @@ pub fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { +pub fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -54266,17 +46109,12 @@ pub fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -54292,13 +46130,12 @@ pub fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { +pub fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -54314,17 +46151,12 @@ pub fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -54340,13 +46172,12 @@ pub fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { +pub fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -54362,21 +46193,12 @@ pub fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -54392,13 +46214,12 @@ pub fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { +pub fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -54414,17 +46235,12 @@ pub fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -54440,13 +46256,12 @@ pub fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { +pub fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -54462,17 +46277,12 @@ pub fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -54488,13 +46298,12 @@ pub fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { +pub fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -54510,17 +46319,12 @@ pub fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -54536,13 +46340,12 @@ pub fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { +pub fn vreinterpret_s64_u64(a: uint64x1_t) -> int64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -54558,23 +46361,14 @@ pub fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u64)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -54588,15 +46382,14 @@ pub fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { +pub fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u64)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -54610,22 +46403,14 @@ pub fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { - unsafe { - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u64)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -54639,15 +46424,14 @@ pub fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { +pub fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u64)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -54661,18 +46445,14 @@ pub fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { - unsafe { - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u64)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -54686,15 +46466,14 @@ pub fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { +pub fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u64)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -54708,18 +46487,14 @@ pub fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { - unsafe { - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u64)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -54733,15 +46508,14 @@ pub fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { +pub fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u64)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -54755,18 +46529,14 @@ pub fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { - unsafe { - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u64)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -54780,15 +46550,14 @@ pub fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { +pub fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u64)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -54802,22 +46571,14 @@ pub fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { - unsafe { - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u64)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -54831,15 +46592,14 @@ pub fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { +pub fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u64)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -54853,18 +46613,14 @@ pub fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { - unsafe { - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u64)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -54878,15 +46634,14 @@ pub fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { +pub fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u64)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -54900,18 +46655,14 @@ pub fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { - unsafe { - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p8)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -54925,15 +46676,14 @@ pub fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { +pub fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p8)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -54947,18 +46697,14 @@ pub fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { - unsafe { - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p8)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -54972,15 +46718,14 @@ pub fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { +pub fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p8)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -54994,22 +46739,14 @@ pub fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { - unsafe { - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p8)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55023,15 +46760,14 @@ pub fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { +pub fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p8)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55045,18 +46781,14 @@ pub fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { - unsafe { - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p8)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55070,15 +46802,14 @@ pub fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { +pub fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p8)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55092,18 +46823,14 @@ pub fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { - unsafe { - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p8)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55117,15 +46844,14 @@ pub fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { +pub fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p8)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55139,16 +46865,14 @@ pub fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p8)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55162,15 +46886,14 @@ pub fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { +pub fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p8)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55184,17 +46907,14 @@ pub fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p8)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55208,15 +46928,14 @@ pub fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { +pub fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p8)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55230,20 +46949,14 @@ pub fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p8)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55257,15 +46970,14 @@ pub fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { +pub fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p8)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55279,16 +46991,14 @@ pub fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p8)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55302,15 +47012,14 @@ pub fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { +pub fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p8)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55324,16 +47033,14 @@ pub fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p8)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55347,15 +47054,14 @@ pub fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { +pub fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p8)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55369,19 +47075,14 @@ pub fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p16)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55395,15 +47096,14 @@ pub fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { +pub fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p16)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55417,16 +47117,14 @@ pub fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p16)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55440,15 +47138,14 @@ pub fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { +pub fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p16)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55462,16 +47159,14 @@ pub fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p16)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55485,15 +47180,14 @@ pub fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { +pub fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p16)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55507,19 +47201,14 @@ pub fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p16)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55533,15 +47222,14 @@ pub fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { +pub fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p16)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55555,16 +47243,14 @@ pub fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p16)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55578,15 +47264,14 @@ pub fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { +pub fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p16)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55600,16 +47285,14 @@ pub fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p16)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55623,15 +47306,14 @@ pub fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { +pub fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p16)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55645,17 +47327,14 @@ pub fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p16)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55669,15 +47348,14 @@ pub fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { +pub fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p16)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55691,20 +47369,14 @@ pub fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p16)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55718,15 +47390,14 @@ pub fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { +pub fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p16)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55740,16 +47411,14 @@ pub fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p16)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55763,15 +47432,14 @@ pub fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { +pub fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p16)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55785,16 +47453,14 @@ pub fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p16)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55808,15 +47474,14 @@ pub fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { +pub fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p16)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -55830,17 +47495,12 @@ pub fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p128)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -55856,13 +47516,12 @@ pub fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { +pub fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p128)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -55878,14 +47537,12 @@ pub fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p128)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -55901,13 +47558,12 @@ pub fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { +pub fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p128)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -55923,14 +47579,12 @@ pub fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p128)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -55946,13 +47600,12 @@ pub fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { +pub fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p128)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -55968,17 +47621,12 @@ pub fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p128)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -55994,13 +47642,12 @@ pub fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { +pub fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p128)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56016,14 +47663,12 @@ pub fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p128)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56039,13 +47684,12 @@ pub fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { +pub fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p128)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56061,14 +47705,12 @@ pub fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p128)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56084,13 +47726,12 @@ pub fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { +pub fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56106,15 +47747,12 @@ pub fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56130,13 +47768,12 @@ pub fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { +pub fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56152,18 +47789,12 @@ pub fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56179,13 +47810,12 @@ pub fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { +pub fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56201,14 +47831,12 @@ pub fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; +pub fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56224,13 +47852,12 @@ pub fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { +pub fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56246,14 +47873,12 @@ pub fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; +pub fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56269,13 +47894,12 @@ pub fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { +pub fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56291,17 +47915,12 @@ pub fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56317,13 +47936,12 @@ pub fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { +pub fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56339,16 +47957,12 @@ pub fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { - unsafe { - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56364,13 +47978,12 @@ pub fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { +pub fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56386,16 +47999,12 @@ pub fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { - unsafe { - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56411,13 +48020,12 @@ pub fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { +pub fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56433,16 +48041,12 @@ pub fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { - unsafe { - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56458,13 +48062,12 @@ pub fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { +pub fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56480,16 +48083,12 @@ pub fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { - unsafe { - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56505,13 +48104,12 @@ pub fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { +pub fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56527,16 +48125,12 @@ pub fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { - unsafe { - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56552,13 +48146,12 @@ pub fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { +pub fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56574,16 +48167,12 @@ pub fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { - unsafe { - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56599,13 +48188,12 @@ pub fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { +pub fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56621,16 +48209,12 @@ pub fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { - unsafe { - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56646,13 +48230,12 @@ pub fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { +pub fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56668,16 +48251,12 @@ pub fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { - unsafe { - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56693,13 +48272,12 @@ pub fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { +pub fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56715,14 +48293,12 @@ pub fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; +pub fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56738,13 +48314,12 @@ pub fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { +pub fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56760,21 +48335,12 @@ pub fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56790,13 +48356,12 @@ pub fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { +pub fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56812,17 +48377,12 @@ pub fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56838,13 +48398,12 @@ pub fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { +pub fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56860,17 +48419,12 @@ pub fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56886,13 +48440,12 @@ pub fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { +pub fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56908,21 +48461,12 @@ pub fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56938,13 +48482,12 @@ pub fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { +pub fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56960,17 +48503,12 @@ pub fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -56986,13 +48524,12 @@ pub fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { +pub fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -57008,17 +48545,12 @@ pub fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -57034,13 +48566,12 @@ pub fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { +pub fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -57056,21 +48587,12 @@ pub fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -57086,13 +48608,12 @@ pub fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { +pub fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -57109,11 +48630,7 @@ pub fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } + unsafe { transmute(a) } } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev16_p8)"] @@ -57882,6 +49399,7 @@ pub fn vrev64q_u8(a: uint8x16_t) -> uint8x16_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vrev64_f16(a: float16x4_t) -> float16x4_t { unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) } } @@ -57896,6 +49414,7 @@ pub fn vrev64_f16(a: float16x4_t) -> float16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vrev64q_f16(a: float16x8_t) -> float16x8_t { unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } } @@ -58258,13 +49777,13 @@ pub fn vrhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vrndn_f16(a: float16x4_t) -> float16x4_t { unsafe extern "unadjusted" { #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), + any(target_arch = "aarch64", target_arch = "arm64ec", target_arch = "arm"), link_name = "llvm.roundeven.v4f16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrintn.v4f16")] fn _vrndn_f16(a: float16x4_t) -> float16x4_t; } unsafe { _vrndn_f16(a) } @@ -58280,13 +49799,13 @@ pub fn vrndn_f16(a: float16x4_t) -> float16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vrndnq_f16(a: float16x8_t) -> float16x8_t { unsafe extern "unadjusted" { #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), + any(target_arch = "aarch64", target_arch = "arm64ec", target_arch = "arm"), link_name = "llvm.roundeven.v8f16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrintn.v8f16")] fn _vrndnq_f16(a: float16x8_t) -> float16x8_t; } unsafe { _vrndnq_f16(a) } @@ -58312,10 +49831,9 @@ pub fn vrndnq_f16(a: float16x8_t) -> float16x8_t { pub fn vrndn_f32(a: float32x2_t) -> float32x2_t { unsafe extern "unadjusted" { #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), + any(target_arch = "aarch64", target_arch = "arm64ec", target_arch = "arm"), link_name = "llvm.roundeven.v2f32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrintn.v2f32")] fn _vrndn_f32(a: float32x2_t) -> float32x2_t; } unsafe { _vrndn_f32(a) } @@ -58341,10 +49859,9 @@ pub fn vrndn_f32(a: float32x2_t) -> float32x2_t { pub fn vrndnq_f32(a: float32x4_t) -> float32x4_t { unsafe extern "unadjusted" { #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), + any(target_arch = "aarch64", target_arch = "arm64ec", target_arch = "arm"), link_name = "llvm.roundeven.v4f32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrintn.v4f32")] fn _vrndnq_f32(a: float32x4_t) -> float32x4_t; } unsafe { _vrndnq_f32(a) } @@ -59366,6 +50883,7 @@ pub fn vrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { assert_instr(frsqrte) )] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vrsqrte_f16(a: float16x4_t) -> float16x4_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v4f16")] @@ -59388,6 +50906,7 @@ pub fn vrsqrte_f16(a: float16x4_t) -> float16x4_t { assert_instr(frsqrte) )] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vrsqrteq_f16(a: float16x8_t) -> float16x8_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v8f16")] @@ -59526,6 +51045,7 @@ pub fn vrsqrteq_u32(a: uint32x4_t) -> uint32x4_t { assert_instr(frsqrts) )] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vrsqrts_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrts.v4f16")] @@ -59548,6 +51068,7 @@ pub fn vrsqrts_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { assert_instr(frsqrts) )] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vrsqrtsq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrts.v8f16")] @@ -60231,6 +51752,7 @@ pub fn vrsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vset_lane_f16(a: f16, b: float16x4_t) -> float16x4_t { static_assert_uimm_bits!(LANE, 2); unsafe { simd_insert!(b, LANE as u32, a) } @@ -60247,6 +51769,7 @@ pub fn vset_lane_f16(a: f16, b: float16x4_t) -> float16x4_t { #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vsetq_lane_f16(a: f16, b: float16x8_t) -> float16x8_t { static_assert_uimm_bits!(LANE, 3); unsafe { simd_insert!(b, LANE as u32, a) } @@ -63699,6 +55222,7 @@ pub fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] pub unsafe fn vst1_f16(ptr: *mut f16, a: float16x4_t) { vst1_v4f16( @@ -63716,6 +55240,7 @@ pub unsafe fn vst1_f16(ptr: *mut f16, a: float16x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] pub unsafe fn vst1q_f16(ptr: *mut f16, a: float16x8_t) { vst1q_v8f16( @@ -63734,6 +55259,7 @@ pub unsafe fn vst1q_f16(ptr: *mut f16, a: float16x8_t) { #[cfg_attr(test, assert_instr(vst1))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vst1_f16_x2(a: *mut f16, b: float16x4x2_t) { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0.v4f16")] @@ -63751,6 +55277,7 @@ pub unsafe fn vst1_f16_x2(a: *mut f16, b: float16x4x2_t) { #[cfg_attr(test, assert_instr(vst1))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vst1q_f16_x2(a: *mut f16, b: float16x8x2_t) { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0.v8f16")] @@ -63767,6 +55294,7 @@ pub unsafe fn vst1q_f16_x2(a: *mut f16, b: float16x8x2_t) { #[cfg_attr(test, assert_instr(st1))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vst1_f16_x2(a: *mut f16, b: float16x4x2_t) { unsafe extern "unadjusted" { #[cfg_attr( @@ -63786,6 +55314,7 @@ pub unsafe fn vst1_f16_x2(a: *mut f16, b: float16x4x2_t) { #[cfg_attr(test, assert_instr(st1))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vst1q_f16_x2(a: *mut f16, b: float16x8x2_t) { unsafe extern "unadjusted" { #[cfg_attr( @@ -63806,6 +55335,7 @@ pub unsafe fn vst1q_f16_x2(a: *mut f16, b: float16x8x2_t) { #[cfg_attr(test, assert_instr(vst1))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vst1_f16_x3(a: *mut f16, b: float16x4x3_t) { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0.v4f16")] @@ -63823,6 +55353,7 @@ pub unsafe fn vst1_f16_x3(a: *mut f16, b: float16x4x3_t) { #[cfg_attr(test, assert_instr(vst1))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vst1q_f16_x3(a: *mut f16, b: float16x8x3_t) { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0.v8f16")] @@ -63839,6 +55370,7 @@ pub unsafe fn vst1q_f16_x3(a: *mut f16, b: float16x8x3_t) { #[cfg_attr(test, assert_instr(st1))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vst1_f16_x3(a: *mut f16, b: float16x4x3_t) { unsafe extern "unadjusted" { #[cfg_attr( @@ -63858,6 +55390,7 @@ pub unsafe fn vst1_f16_x3(a: *mut f16, b: float16x4x3_t) { #[cfg_attr(test, assert_instr(st1))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vst1q_f16_x3(a: *mut f16, b: float16x8x3_t) { unsafe extern "unadjusted" { #[cfg_attr( @@ -63877,6 +55410,7 @@ pub unsafe fn vst1q_f16_x3(a: *mut f16, b: float16x8x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1_f16_x4(a: *mut f16, b: float16x4x4_t) { unsafe extern "unadjusted" { @@ -63900,6 +55434,7 @@ pub unsafe fn vst1_f16_x4(a: *mut f16, b: float16x4x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(vst1))] pub unsafe fn vst1q_f16_x4(a: *mut f16, b: float16x8x4_t) { unsafe extern "unadjusted" { @@ -63923,6 +55458,7 @@ pub unsafe fn vst1q_f16_x4(a: *mut f16, b: float16x8x4_t) { #[cfg_attr(test, assert_instr(st1))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vst1_f16_x4(a: *mut f16, b: float16x4x4_t) { unsafe extern "unadjusted" { #[cfg_attr( @@ -63948,6 +55484,7 @@ pub unsafe fn vst1_f16_x4(a: *mut f16, b: float16x4x4_t) { #[cfg_attr(test, assert_instr(st1))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vst1q_f16_x4(a: *mut f16, b: float16x8x4_t) { unsafe extern "unadjusted" { #[cfg_attr( @@ -64556,6 +56093,7 @@ pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vst1_lane_f16(a: *mut f16, b: float16x4_t) { static_assert_uimm_bits!(LANE, 2); *a = simd_extract!(b, LANE as u32); @@ -64574,6 +56112,7 @@ pub unsafe fn vst1_lane_f16(a: *mut f16, b: float16x4_t) { #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vst1q_lane_f16(a: *mut f16, b: float16x8_t) { static_assert_uimm_bits!(LANE, 3); *a = simd_extract!(b, LANE as u32); @@ -67138,6 +58677,7 @@ unsafe fn vst1q_v8i16(addr: *const i8, val: int16x8_t, align: i32) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] unsafe fn vst1_v4f16(addr: *const i8, val: float16x4_t, align: i32) { unsafe extern "unadjusted" { @@ -67155,6 +58695,7 @@ unsafe fn vst1_v4f16(addr: *const i8, val: float16x4_t, align: i32) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] unsafe fn vst1q_v8f16(addr: *const i8, val: float16x8_t, align: i32) { unsafe extern "unadjusted" { @@ -67196,6 +58737,7 @@ pub unsafe fn vst1q_lane_p64(a: *mut p64, b: poly64x2_t) { #[cfg(not(target_arch = "arm"))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(st2))] pub unsafe fn vst2_f16(a: *mut f16, b: float16x4x2_t) { unsafe extern "unadjusted" { @@ -67215,6 +58757,7 @@ pub unsafe fn vst2_f16(a: *mut f16, b: float16x4x2_t) { #[cfg(not(target_arch = "arm"))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(st2))] pub unsafe fn vst2q_f16(a: *mut f16, b: float16x8x2_t) { unsafe extern "unadjusted" { @@ -67235,6 +58778,7 @@ pub unsafe fn vst2q_f16(a: *mut f16, b: float16x8x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2_f16(a: *mut f16, b: float16x4x2_t) { unsafe extern "unadjusted" { @@ -67252,6 +58796,7 @@ pub unsafe fn vst2_f16(a: *mut f16, b: float16x4x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(vst2))] pub unsafe fn vst2q_f16(a: *mut f16, b: float16x8x2_t) { unsafe extern "unadjusted" { @@ -67550,6 +59095,7 @@ pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { #[cfg_attr(test, assert_instr(st2, LANE = 0))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vst2_lane_f16(a: *mut f16, b: float16x4x2_t) { static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { @@ -67571,6 +59117,7 @@ pub unsafe fn vst2_lane_f16(a: *mut f16, b: float16x4x2_t) { #[cfg_attr(test, assert_instr(st2, LANE = 0))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vst2q_lane_f16(a: *mut f16, b: float16x8x2_t) { static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { @@ -67593,6 +59140,7 @@ pub unsafe fn vst2q_lane_f16(a: *mut f16, b: float16x8x2_t) { #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vst2_lane_f16(a: *mut f16, b: float16x4x2_t) { static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { @@ -67612,6 +59160,7 @@ pub unsafe fn vst2_lane_f16(a: *mut f16, b: float16x4x2_t) { #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vst2q_lane_f16(a: *mut f16, b: float16x8x2_t) { static_assert_uimm_bits!(LANE, 1); unsafe extern "unadjusted" { @@ -68413,6 +59962,7 @@ pub unsafe fn vst2q_p16(a: *mut p16, b: poly16x8x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(vst3))] pub unsafe fn vst3_f16(a: *mut f16, b: float16x4x3_t) { unsafe extern "unadjusted" { @@ -68430,6 +59980,7 @@ pub unsafe fn vst3_f16(a: *mut f16, b: float16x4x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(vst3))] pub unsafe fn vst3q_f16(a: *mut f16, b: float16x8x3_t) { unsafe extern "unadjusted" { @@ -68446,6 +59997,7 @@ pub unsafe fn vst3q_f16(a: *mut f16, b: float16x8x3_t) { #[cfg(not(target_arch = "arm"))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3_f16(a: *mut f16, b: float16x4x3_t) { unsafe extern "unadjusted" { @@ -68465,6 +60017,7 @@ pub unsafe fn vst3_f16(a: *mut f16, b: float16x4x3_t) { #[cfg(not(target_arch = "arm"))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(st3))] pub unsafe fn vst3q_f16(a: *mut f16, b: float16x8x3_t) { unsafe extern "unadjusted" { @@ -68767,6 +60320,7 @@ pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vst3_lane_f16(a: *mut f16, b: float16x4x3_t) { static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { @@ -68793,6 +60347,7 @@ pub unsafe fn vst3_lane_f16(a: *mut f16, b: float16x4x3_t) { #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vst3q_lane_f16(a: *mut f16, b: float16x8x3_t) { static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { @@ -68818,6 +60373,7 @@ pub unsafe fn vst3q_lane_f16(a: *mut f16, b: float16x8x3_t) { #[cfg_attr(test, assert_instr(st3, LANE = 0))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vst3_lane_f16(a: *mut f16, b: float16x4x3_t) { static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { @@ -68839,6 +60395,7 @@ pub unsafe fn vst3_lane_f16(a: *mut f16, b: float16x4x3_t) { #[cfg_attr(test, assert_instr(st3, LANE = 0))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vst3q_lane_f16(a: *mut f16, b: float16x8x3_t) { static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { @@ -69685,6 +61242,7 @@ pub unsafe fn vst3q_p16(a: *mut p16, b: poly16x8x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(vst4))] pub unsafe fn vst4_f16(a: *mut f16, b: float16x4x4_t) { unsafe extern "unadjusted" { @@ -69709,6 +61267,7 @@ pub unsafe fn vst4_f16(a: *mut f16, b: float16x4x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(vst4))] pub unsafe fn vst4q_f16(a: *mut f16, b: float16x8x4_t) { unsafe extern "unadjusted" { @@ -69732,6 +61291,7 @@ pub unsafe fn vst4q_f16(a: *mut f16, b: float16x8x4_t) { #[cfg(not(target_arch = "arm"))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(st4))] pub unsafe fn vst4_f16(a: *mut f16, b: float16x4x4_t) { unsafe extern "unadjusted" { @@ -69751,6 +61311,7 @@ pub unsafe fn vst4_f16(a: *mut f16, b: float16x4x4_t) { #[cfg(not(target_arch = "arm"))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(st4))] pub unsafe fn vst4q_f16(a: *mut f16, b: float16x8x4_t) { unsafe extern "unadjusted" { @@ -70102,6 +61663,7 @@ pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vst4_lane_f16(a: *mut f16, b: float16x4x4_t) { static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { @@ -70129,6 +61691,7 @@ pub unsafe fn vst4_lane_f16(a: *mut f16, b: float16x4x4_t) { #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vst4q_lane_f16(a: *mut f16, b: float16x8x4_t) { static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { @@ -70155,6 +61718,7 @@ pub unsafe fn vst4q_lane_f16(a: *mut f16, b: float16x8x4_t) { #[cfg_attr(test, assert_instr(st4, LANE = 0))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vst4_lane_f16(a: *mut f16, b: float16x4x4_t) { static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { @@ -70183,6 +61747,7 @@ pub unsafe fn vst4_lane_f16(a: *mut f16, b: float16x4x4_t) { #[cfg_attr(test, assert_instr(st4, LANE = 0))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vst4q_lane_f16(a: *mut f16, b: float16x8x4_t) { static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { @@ -71124,6 +62689,7 @@ pub unsafe fn vstrq_p128(a: *mut p128, b: p128) { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vsub_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe { simd_sub(a, b) } } @@ -71138,6 +62704,7 @@ pub fn vsub_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vsubq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unsafe { simd_sub(a, b) } } @@ -73002,6 +64569,7 @@ pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vtrn_f16(a: float16x4_t, b: float16x4_t) -> float16x4x2_t { unsafe { let a1: float16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); @@ -73024,6 +64592,7 @@ pub fn vtrn_f16(a: float16x4_t, b: float16x4_t) -> float16x4x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vtrnq_f16(a: float16x8_t, b: float16x8_t) -> float16x8x2_t { unsafe { let a1: float16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); @@ -74134,6 +65703,7 @@ pub fn vusmmlaq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vuzp_f16(a: float16x4_t, b: float16x4_t) -> float16x4x2_t { unsafe { let a0: float16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); @@ -74156,6 +65726,7 @@ pub fn vuzp_f16(a: float16x4_t, b: float16x4_t) -> float16x4x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vuzpq_f16(a: float16x8_t, b: float16x8_t) -> float16x8x2_t { unsafe { let a0: float16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); @@ -74724,6 +66295,7 @@ pub fn vuzpq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vzip_f16(a: float16x4_t, b: float16x4_t) -> float16x4x2_t { unsafe { let a0: float16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); @@ -74746,6 +66318,7 @@ pub fn vzip_f16(a: float16x4_t, b: float16x4_t) -> float16x4x2_t { )] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] pub fn vzipq_f16(a: float16x8_t, b: float16x8_t) -> float16x8x2_t { unsafe { let a0: float16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); diff --git a/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs b/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs index 60c9daef68c42..fbd1967c544ad 100644 --- a/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs +++ b/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs @@ -5503,8 +5503,12 @@ mod tests { test_vcombine!(test_vcombine_s16 => vcombine_s16([3_i16, -4, 5, -6], [13_i16, -14, 15, -16])); test_vcombine!(test_vcombine_u16 => vcombine_u16([3_u16, 4, 5, 6], [13_u16, 14, 15, 16])); test_vcombine!(test_vcombine_p16 => vcombine_p16([3_u16, 4, 5, 6], [13_u16, 14, 15, 16])); - test_vcombine!(test_vcombine_f16 => vcombine_f16([3_f16, 4., 5., 6.], - [13_f16, 14., 15., 16.])); + #[cfg(not(target_arch = "arm64ec"))] + mod fp16 { + use super::*; + test_vcombine!(test_vcombine_f16 => vcombine_f16([3_f16, 4., 5., 6.], + [13_f16, 14., 15., 16.])); + } test_vcombine!(test_vcombine_s32 => vcombine_s32([3_i32, -4], [13_i32, -14])); test_vcombine!(test_vcombine_u32 => vcombine_u32([3_u32, 4], [13_u32, 14])); diff --git a/library/stdarch/crates/core_arch/src/lib.rs b/library/stdarch/crates/core_arch/src/lib.rs index 7d3cbd55ad85c..26a9cb5899183 100644 --- a/library/stdarch/crates/core_arch/src/lib.rs +++ b/library/stdarch/crates/core_arch/src/lib.rs @@ -33,7 +33,8 @@ x86_amx_intrinsics, f16, aarch64_unstable_target_feature, - bigint_helper_methods + bigint_helper_methods, + funnel_shifts )] #![cfg_attr(test, feature(test, abi_vectorcall, stdarch_internal))] #![deny(clippy::missing_inline_in_public_items)] diff --git a/library/stdarch/crates/core_arch/src/loongarch32/mod.rs b/library/stdarch/crates/core_arch/src/loongarch32/mod.rs index fb05450373c28..4e3f3d27182e4 100644 --- a/library/stdarch/crates/core_arch/src/loongarch32/mod.rs +++ b/library/stdarch/crates/core_arch/src/loongarch32/mod.rs @@ -17,9 +17,10 @@ unsafe extern "unadjusted" { /// Generates the cache operation instruction #[inline] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn cacop(a: i32, b: i32) { - static_assert_simm_bits!(IMM12, 12); - __cacop(a, b, IMM12); +pub unsafe fn cacop(b: i32) { + static_assert_uimm_bits!(IMM5, 5); + static_assert_simm_bits!(IMM_S12, 12); + __cacop(IMM5, b, IMM_S12); } /// Reads the CSR diff --git a/library/stdarch/crates/core_arch/src/loongarch64/mod.rs b/library/stdarch/crates/core_arch/src/loongarch64/mod.rs index e8249805eae26..ab968aff20bbe 100644 --- a/library/stdarch/crates/core_arch/src/loongarch64/mod.rs +++ b/library/stdarch/crates/core_arch/src/loongarch64/mod.rs @@ -64,9 +64,10 @@ pub fn crcc_w_d_w(a: i64, b: i32) -> i32 { /// Generates the cache operation instruction #[inline] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn cacop(a: i64, b: i64) { - static_assert_simm_bits!(IMM12, 12); - __cacop(a, b, IMM12); +pub unsafe fn cacop(b: i64) { + static_assert_uimm_bits!(IMM5, 5); + static_assert_simm_bits!(IMM_S12, 12); + __cacop(IMM5, b, IMM_S12); } /// Reads the CSR @@ -125,14 +126,16 @@ pub unsafe fn asrtgt(a: i64, b: i64) { #[inline] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn lddir(a: i64) -> i64 { - __lddir(a, B) +pub unsafe fn lddir(a: i64) -> i64 { + static_assert_uimm_bits!(IMM8, 8); + __lddir(a, IMM8) } /// Loads the page table entry #[inline] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn ldpte(a: i64) { - __ldpte(a, B) +pub unsafe fn ldpte(a: i64) { + static_assert_uimm_bits!(IMM8, 8); + __ldpte(a, IMM8) } diff --git a/library/stdarch/crates/core_arch/src/loongarch_shared/mod.rs b/library/stdarch/crates/core_arch/src/loongarch_shared/mod.rs index 710b926f8df4f..8991fe857682b 100644 --- a/library/stdarch/crates/core_arch/src/loongarch_shared/mod.rs +++ b/library/stdarch/crates/core_arch/src/loongarch_shared/mod.rs @@ -131,17 +131,17 @@ pub fn ibar() { /// Moves data from a GPR to the FCSR #[inline] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub unsafe fn movgr2fcsr(a: i32) { - static_assert_uimm_bits!(IMM5, 5); - __movgr2fcsr(IMM5, a); +pub unsafe fn movgr2fcsr(a: i32) { + static_assert_uimm_bits!(IMM2, 2); + __movgr2fcsr(IMM2, a); } /// Moves data from a FCSR to the GPR #[inline] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn movfcsr2gr() -> i32 { - static_assert_uimm_bits!(IMM5, 5); - unsafe { __movfcsr2gr(IMM5) } +pub fn movfcsr2gr() -> i32 { + static_assert_uimm_bits!(IMM2, 2); + unsafe { __movfcsr2gr(IMM2) } } /// Reads the 8-bit IO-CSR diff --git a/library/stdarch/crates/core_arch/src/riscv64/mod.rs b/library/stdarch/crates/core_arch/src/riscv64/mod.rs index 0e860f6f2ad2f..a7efc0c7f58a1 100644 --- a/library/stdarch/crates/core_arch/src/riscv64/mod.rs +++ b/library/stdarch/crates/core_arch/src/riscv64/mod.rs @@ -20,7 +20,12 @@ pub use zk::*; #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hlv_wu(src: *const u32) -> u32 { let value: u32; - asm!(".insn i 0x73, 0x4, {}, {}, 0x681", out(reg) value, in(reg) src, options(readonly, nostack)); + asm!( + ".insn i 0x73, 0x4, {}, {}, 0x681", + lateout(reg) value, + in(reg) src, + options(readonly, nostack, preserves_flags) + ); value } @@ -38,7 +43,12 @@ pub unsafe fn hlv_wu(src: *const u32) -> u32 { #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hlv_d(src: *const i64) -> i64 { let value: i64; - asm!(".insn i 0x73, 0x4, {}, {}, 0x6C0", out(reg) value, in(reg) src, options(readonly, nostack)); + asm!( + ".insn i 0x73, 0x4, {}, {}, 0x6C0", + lateout(reg) value, + in(reg) src, + options(readonly, nostack, preserves_flags) + ); value } @@ -53,5 +63,10 @@ pub unsafe fn hlv_d(src: *const i64) -> i64 { #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hsv_d(dst: *mut i64, src: i64) { - asm!(".insn r 0x73, 0x4, 0x37, x0, {}, {}", in(reg) dst, in(reg) src, options(nostack)); + asm!( + ".insn r 0x73, 0x4, 0x37, x0, {}, {}", + in(reg) dst, + in(reg) src, + options(nostack, preserves_flags) + ); } diff --git a/library/stdarch/crates/core_arch/src/riscv64/zk.rs b/library/stdarch/crates/core_arch/src/riscv64/zk.rs index c6af750bbc570..a30653cbe0885 100644 --- a/library/stdarch/crates/core_arch/src/riscv64/zk.rs +++ b/library/stdarch/crates/core_arch/src/riscv64/zk.rs @@ -176,7 +176,7 @@ pub fn aes64ks2(rs1: u64, rs2: u64) -> u64 { /// Version: v1.0.1 /// /// Section: 3.9 -#[target_feature(enable = "zkne", enable = "zknd")] +#[target_feature(enable = "zknd")] #[cfg_attr(test, assert_instr(aes64im))] #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] diff --git a/library/stdarch/crates/core_arch/src/riscv_shared/mod.rs b/library/stdarch/crates/core_arch/src/riscv_shared/mod.rs index 3ce24324de2e7..1bd147a64808c 100644 --- a/library/stdarch/crates/core_arch/src/riscv_shared/mod.rs +++ b/library/stdarch/crates/core_arch/src/riscv_shared/mod.rs @@ -44,7 +44,12 @@ use crate::arch::asm; #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn pause() { - unsafe { asm!(".insn i 0x0F, 0, x0, x0, 0x010", options(nomem, nostack)) } + unsafe { + asm!( + ".insn i 0x0F, 0, x0, x0, 0x010", + options(nomem, nostack, preserves_flags) + ); + } } /// Generates the `NOP` instruction @@ -54,7 +59,9 @@ pub fn pause() { #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn nop() { - unsafe { asm!("nop", options(nomem, nostack)) } + unsafe { + asm!("nop", options(nomem, nostack, preserves_flags)); + } } /// Generates the `WFI` instruction @@ -65,7 +72,7 @@ pub fn nop() { #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn wfi() { - asm!("wfi", options(nomem, nostack)) + asm!("wfi", options(nomem, nostack, preserves_flags)); } /// Generates the `FENCE.I` instruction @@ -78,7 +85,7 @@ pub unsafe fn wfi() { #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn fence_i() { - asm!("fence.i", options(nostack)) + asm!("fence.i", options(nostack, preserves_flags)); } /// Supervisor memory management fence for given virtual address and address space @@ -92,7 +99,7 @@ pub unsafe fn fence_i() { #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn sfence_vma(vaddr: usize, asid: usize) { - asm!("sfence.vma {}, {}", in(reg) vaddr, in(reg) asid, options(nostack)) + asm!("sfence.vma {}, {}", in(reg) vaddr, in(reg) asid, options(nostack, preserves_flags)); } /// Supervisor memory management fence for given virtual address @@ -104,7 +111,7 @@ pub unsafe fn sfence_vma(vaddr: usize, asid: usize) { #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn sfence_vma_vaddr(vaddr: usize) { - asm!("sfence.vma {}, x0", in(reg) vaddr, options(nostack)) + asm!("sfence.vma {}, x0", in(reg) vaddr, options(nostack, preserves_flags)); } /// Supervisor memory management fence for given address space @@ -118,7 +125,7 @@ pub unsafe fn sfence_vma_vaddr(vaddr: usize) { #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn sfence_vma_asid(asid: usize) { - asm!("sfence.vma x0, {}", in(reg) asid, options(nostack)) + asm!("sfence.vma x0, {}", in(reg) asid, options(nostack, preserves_flags)); } /// Supervisor memory management fence for all address spaces and virtual addresses @@ -129,7 +136,7 @@ pub unsafe fn sfence_vma_asid(asid: usize) { #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn sfence_vma_all() { - asm!("sfence.vma", options(nostack)) + asm!("sfence.vma", options(nostack, preserves_flags)); } /// Invalidate supervisor translation cache for given virtual address and address space @@ -139,8 +146,13 @@ pub unsafe fn sfence_vma_all() { #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn sinval_vma(vaddr: usize, asid: usize) { - // asm!("sinval.vma {}, {}", in(reg) vaddr, in(reg) asid, options(nostack)) - asm!(".insn r 0x73, 0, 0x0B, x0, {}, {}", in(reg) vaddr, in(reg) asid, options(nostack)) + // asm!("sinval.vma {}, {}", in(reg) vaddr, in(reg) asid, options(nostack, preserves_flags)); + asm!( + ".insn r 0x73, 0, 0x0B, x0, {}, {}", + in(reg) vaddr, + in(reg) asid, + options(nostack, preserves_flags) + ); } /// Invalidate supervisor translation cache for given virtual address @@ -150,7 +162,11 @@ pub unsafe fn sinval_vma(vaddr: usize, asid: usize) { #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn sinval_vma_vaddr(vaddr: usize) { - asm!(".insn r 0x73, 0, 0x0B, x0, {}, x0", in(reg) vaddr, options(nostack)) + asm!( + ".insn r 0x73, 0, 0x0B, x0, {}, x0", + in(reg) vaddr, + options(nostack, preserves_flags) + ); } /// Invalidate supervisor translation cache for given address space @@ -160,7 +176,11 @@ pub unsafe fn sinval_vma_vaddr(vaddr: usize) { #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn sinval_vma_asid(asid: usize) { - asm!(".insn r 0x73, 0, 0x0B, x0, x0, {}", in(reg) asid, options(nostack)) + asm!( + ".insn r 0x73, 0, 0x0B, x0, x0, {}", + in(reg) asid, + options(nostack, preserves_flags) + ); } /// Invalidate supervisor translation cache for all address spaces and virtual addresses @@ -170,7 +190,10 @@ pub unsafe fn sinval_vma_asid(asid: usize) { #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn sinval_vma_all() { - asm!(".insn r 0x73, 0, 0x0B, x0, x0, x0", options(nostack)) + asm!( + ".insn r 0x73, 0, 0x0B, x0, x0, x0", + options(nostack, preserves_flags) + ); } /// Generates the `SFENCE.W.INVAL` instruction @@ -180,8 +203,11 @@ pub unsafe fn sinval_vma_all() { #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn sfence_w_inval() { - // asm!("sfence.w.inval", options(nostack)) - asm!(".insn i 0x73, 0, x0, x0, 0x180", options(nostack)) + // asm!("sfence.w.inval", options(nostack, preserves_flags)); + asm!( + ".insn i 0x73, 0, x0, x0, 0x180", + options(nostack, preserves_flags) + ); } /// Generates the `SFENCE.INVAL.IR` instruction @@ -191,8 +217,11 @@ pub unsafe fn sfence_w_inval() { #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn sfence_inval_ir() { - // asm!("sfence.inval.ir", options(nostack)) - asm!(".insn i 0x73, 0, x0, x0, 0x181", options(nostack)) + // asm!("sfence.inval.ir", options(nostack, preserves_flags)); + asm!( + ".insn i 0x73, 0, x0, x0, 0x181", + options(nostack, preserves_flags) + ); } /// Loads virtual machine memory by signed byte integer @@ -207,7 +236,12 @@ pub unsafe fn sfence_inval_ir() { #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hlv_b(src: *const i8) -> i8 { let value: i8; - asm!(".insn i 0x73, 0x4, {}, {}, 0x600", out(reg) value, in(reg) src, options(readonly, nostack)); + asm!( + ".insn i 0x73, 0x4, {}, {}, 0x600", + lateout(reg) value, + in(reg) src, + options(readonly, nostack, preserves_flags) + ); value } @@ -223,7 +257,12 @@ pub unsafe fn hlv_b(src: *const i8) -> i8 { #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hlv_bu(src: *const u8) -> u8 { let value: u8; - asm!(".insn i 0x73, 0x4, {}, {}, 0x601", out(reg) value, in(reg) src, options(readonly, nostack)); + asm!( + ".insn i 0x73, 0x4, {}, {}, 0x601", + lateout(reg) value, + in(reg) src, + options(readonly, nostack, preserves_flags) + ); value } @@ -239,7 +278,12 @@ pub unsafe fn hlv_bu(src: *const u8) -> u8 { #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hlv_h(src: *const i16) -> i16 { let value: i16; - asm!(".insn i 0x73, 0x4, {}, {}, 0x640", out(reg) value, in(reg) src, options(readonly, nostack)); + asm!( + ".insn i 0x73, 0x4, {}, {}, 0x640", + lateout(reg) value, + in(reg) src, + options(readonly, nostack, preserves_flags) + ); value } @@ -255,7 +299,12 @@ pub unsafe fn hlv_h(src: *const i16) -> i16 { #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hlv_hu(src: *const u16) -> u16 { let value: u16; - asm!(".insn i 0x73, 0x4, {}, {}, 0x641", out(reg) value, in(reg) src, options(readonly, nostack)); + asm!( + ".insn i 0x73, 0x4, {}, {}, 0x641", + lateout(reg) value, + in(reg) src, + options(readonly, nostack, preserves_flags) + ); value } @@ -271,7 +320,12 @@ pub unsafe fn hlv_hu(src: *const u16) -> u16 { #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hlvx_hu(src: *const u16) -> u16 { let insn: u16; - asm!(".insn i 0x73, 0x4, {}, {}, 0x643", out(reg) insn, in(reg) src, options(readonly, nostack)); + asm!( + ".insn i 0x73, 0x4, {}, {}, 0x643", + lateout(reg) insn, + in(reg) src, + options(readonly, nostack, preserves_flags) + ); insn } @@ -287,7 +341,12 @@ pub unsafe fn hlvx_hu(src: *const u16) -> u16 { #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hlv_w(src: *const i32) -> i32 { let value: i32; - asm!(".insn i 0x73, 0x4, {}, {}, 0x680", out(reg) value, in(reg) src, options(readonly, nostack)); + asm!( + ".insn i 0x73, 0x4, {}, {}, 0x680", + lateout(reg) value, + in(reg) src, + options(readonly, nostack, preserves_flags) + ); value } @@ -303,7 +362,12 @@ pub unsafe fn hlv_w(src: *const i32) -> i32 { #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hlvx_wu(src: *const u32) -> u32 { let insn: u32; - asm!(".insn i 0x73, 0x4, {}, {}, 0x683", out(reg) insn, in(reg) src, options(readonly, nostack)); + asm!( + ".insn i 0x73, 0x4, {}, {}, 0x683", + lateout(reg) insn, + in(reg) src, + options(readonly, nostack, preserves_flags) + ); insn } @@ -318,7 +382,12 @@ pub unsafe fn hlvx_wu(src: *const u32) -> u32 { #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hsv_b(dst: *mut i8, src: i8) { - asm!(".insn r 0x73, 0x4, 0x31, x0, {}, {}", in(reg) dst, in(reg) src, options(nostack)); + asm!( + ".insn r 0x73, 0x4, 0x31, x0, {}, {}", + in(reg) dst, + in(reg) src, + options(nostack, preserves_flags) + ); } /// Stores virtual machine memory by half integer @@ -332,7 +401,12 @@ pub unsafe fn hsv_b(dst: *mut i8, src: i8) { #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hsv_h(dst: *mut i16, src: i16) { - asm!(".insn r 0x73, 0x4, 0x33, x0, {}, {}", in(reg) dst, in(reg) src, options(nostack)); + asm!( + ".insn r 0x73, 0x4, 0x33, x0, {}, {}", + in(reg) dst, + in(reg) src, + options(nostack, preserves_flags) + ); } /// Stores virtual machine memory by word integer @@ -346,7 +420,12 @@ pub unsafe fn hsv_h(dst: *mut i16, src: i16) { #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hsv_w(dst: *mut i32, src: i32) { - asm!(".insn r 0x73, 0x4, 0x35, x0, {}, {}", in(reg) dst, in(reg) src, options(nostack)); + asm!( + ".insn r 0x73, 0x4, 0x35, x0, {}, {}", + in(reg) dst, + in(reg) src, + options(nostack, preserves_flags) + ); } /// Hypervisor memory management fence for given guest virtual address and guest address space @@ -360,8 +439,13 @@ pub unsafe fn hsv_w(dst: *mut i32, src: i32) { #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hfence_vvma(vaddr: usize, asid: usize) { - // asm!("hfence.vvma {}, {}", in(reg) vaddr, in(reg) asid) - asm!(".insn r 0x73, 0, 0x11, x0, {}, {}", in(reg) vaddr, in(reg) asid, options(nostack)) + // asm!("hfence.vvma {}, {}", in(reg) vaddr, in(reg) asid, options(nostack, preserves_flags)); + asm!( + ".insn r 0x73, 0, 0x11, x0, {}, {}", + in(reg) vaddr, + in(reg) asid, + options(nostack, preserves_flags) + ); } /// Hypervisor memory management fence for given guest virtual address @@ -375,7 +459,11 @@ pub unsafe fn hfence_vvma(vaddr: usize, asid: usize) { #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hfence_vvma_vaddr(vaddr: usize) { - asm!(".insn r 0x73, 0, 0x11, x0, {}, x0", in(reg) vaddr, options(nostack)) + asm!( + ".insn r 0x73, 0, 0x11, x0, {}, x0", + in(reg) vaddr, + options(nostack, preserves_flags) + ); } /// Hypervisor memory management fence for given guest address space @@ -389,7 +477,11 @@ pub unsafe fn hfence_vvma_vaddr(vaddr: usize) { #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hfence_vvma_asid(asid: usize) { - asm!(".insn r 0x73, 0, 0x11, x0, x0, {}", in(reg) asid, options(nostack)) + asm!( + ".insn r 0x73, 0, 0x11, x0, x0, {}", + in(reg) asid, + options(nostack, preserves_flags) + ); } /// Hypervisor memory management fence for all guest address spaces and guest virtual addresses @@ -403,7 +495,10 @@ pub unsafe fn hfence_vvma_asid(asid: usize) { #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hfence_vvma_all() { - asm!(".insn r 0x73, 0, 0x11, x0, x0, x0", options(nostack)) + asm!( + ".insn r 0x73, 0, 0x11, x0, x0, x0", + options(nostack, preserves_flags) + ); } /// Hypervisor memory management fence for guest physical address and virtual machine @@ -416,8 +511,13 @@ pub unsafe fn hfence_vvma_all() { #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hfence_gvma(gaddr: usize, vmid: usize) { - // asm!("hfence.gvma {}, {}", in(reg) gaddr, in(reg) vmid, options(nostack)) - asm!(".insn r 0x73, 0, 0x31, x0, {}, {}", in(reg) gaddr, in(reg) vmid, options(nostack)) + // asm!("hfence.gvma {}, {}", in(reg) gaddr, in(reg) vmid, options(nostack, preserves_flags)); + asm!( + ".insn r 0x73, 0, 0x31, x0, {}, {}", + in(reg) gaddr, + in(reg) vmid, + options(nostack, preserves_flags) + ); } /// Hypervisor memory management fence for guest physical address @@ -429,7 +529,11 @@ pub unsafe fn hfence_gvma(gaddr: usize, vmid: usize) { #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hfence_gvma_gaddr(gaddr: usize) { - asm!(".insn r 0x73, 0, 0x31, x0, {}, x0", in(reg) gaddr, options(nostack)) + asm!( + ".insn r 0x73, 0, 0x31, x0, {}, x0", + in(reg) gaddr, + options(nostack, preserves_flags) + ); } /// Hypervisor memory management fence for given virtual machine @@ -441,7 +545,11 @@ pub unsafe fn hfence_gvma_gaddr(gaddr: usize) { #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hfence_gvma_vmid(vmid: usize) { - asm!(".insn r 0x73, 0, 0x31, x0, x0, {}", in(reg) vmid, options(nostack)) + asm!( + ".insn r 0x73, 0, 0x31, x0, x0, {}", + in(reg) vmid, + options(nostack, preserves_flags) + ); } /// Hypervisor memory management fence for all virtual machines and guest physical addresses @@ -453,7 +561,10 @@ pub unsafe fn hfence_gvma_vmid(vmid: usize) { #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hfence_gvma_all() { - asm!(".insn r 0x73, 0, 0x31, x0, x0, x0", options(nostack)) + asm!( + ".insn r 0x73, 0, 0x31, x0, x0, x0", + options(nostack, preserves_flags) + ); } /// Invalidate hypervisor translation cache for given guest virtual address and guest address space @@ -465,8 +576,13 @@ pub unsafe fn hfence_gvma_all() { #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hinval_vvma(vaddr: usize, asid: usize) { - // asm!("hinval.vvma {}, {}", in(reg) vaddr, in(reg) asid, options(nostack)) - asm!(".insn r 0x73, 0, 0x13, x0, {}, {}", in(reg) vaddr, in(reg) asid, options(nostack)) + // asm!("hinval.vvma {}, {}", in(reg) vaddr, in(reg) asid, options(nostack, preserves_flags)); + asm!( + ".insn r 0x73, 0, 0x13, x0, {}, {}", + in(reg) vaddr, + in(reg) asid, + options(nostack, preserves_flags) + ); } /// Invalidate hypervisor translation cache for given guest virtual address @@ -478,7 +594,11 @@ pub unsafe fn hinval_vvma(vaddr: usize, asid: usize) { #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hinval_vvma_vaddr(vaddr: usize) { - asm!(".insn r 0x73, 0, 0x13, x0, {}, x0", in(reg) vaddr, options(nostack)) + asm!( + ".insn r 0x73, 0, 0x13, x0, {}, x0", + in(reg) vaddr, + options(nostack, preserves_flags) + ); } /// Invalidate hypervisor translation cache for given guest address space @@ -490,7 +610,11 @@ pub unsafe fn hinval_vvma_vaddr(vaddr: usize) { #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hinval_vvma_asid(asid: usize) { - asm!(".insn r 0x73, 0, 0x13, x0, x0, {}", in(reg) asid, options(nostack)) + asm!( + ".insn r 0x73, 0, 0x13, x0, x0, {}", + in(reg) asid, + options(nostack, preserves_flags) + ); } /// Invalidate hypervisor translation cache for all guest address spaces and guest virtual addresses @@ -502,7 +626,10 @@ pub unsafe fn hinval_vvma_asid(asid: usize) { #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hinval_vvma_all() { - asm!(".insn r 0x73, 0, 0x13, x0, x0, x0", options(nostack)) + asm!( + ".insn r 0x73, 0, 0x13, x0, x0, x0", + options(nostack, preserves_flags) + ); } /// Invalidate hypervisor translation cache for guest physical address and virtual machine @@ -515,8 +642,13 @@ pub unsafe fn hinval_vvma_all() { #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hinval_gvma(gaddr: usize, vmid: usize) { - // asm!("hinval.gvma {}, {}", in(reg) gaddr, in(reg) vmid, options(nostack)) - asm!(".insn r 0x73, 0, 0x33, x0, {}, {}", in(reg) gaddr, in(reg) vmid, options(nostack)) + // asm!("hinval.gvma {}, {}", in(reg) gaddr, in(reg) vmid, options(nostack, preserves_flags)); + asm!( + ".insn r 0x73, 0, 0x33, x0, {}, {}", + in(reg) gaddr, + in(reg) vmid, + options(nostack, preserves_flags) + ); } /// Invalidate hypervisor translation cache for guest physical address @@ -528,7 +660,11 @@ pub unsafe fn hinval_gvma(gaddr: usize, vmid: usize) { #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hinval_gvma_gaddr(gaddr: usize) { - asm!(".insn r 0x73, 0, 0x33, x0, {}, x0", in(reg) gaddr, options(nostack)) + asm!( + ".insn r 0x73, 0, 0x33, x0, {}, x0", + in(reg) gaddr, + options(nostack, preserves_flags) + ); } /// Invalidate hypervisor translation cache for given virtual machine @@ -540,7 +676,11 @@ pub unsafe fn hinval_gvma_gaddr(gaddr: usize) { #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hinval_gvma_vmid(vmid: usize) { - asm!(".insn r 0x73, 0, 0x33, x0, x0, {}", in(reg) vmid, options(nostack)) + asm!( + ".insn r 0x73, 0, 0x33, x0, x0, {}", + in(reg) vmid, + options(nostack, preserves_flags) + ); } /// Invalidate hypervisor translation cache for all virtual machines and guest physical addresses @@ -552,7 +692,10 @@ pub unsafe fn hinval_gvma_vmid(vmid: usize) { #[inline] #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hinval_gvma_all() { - asm!(".insn r 0x73, 0, 0x33, x0, x0, x0", options(nostack)) + asm!( + ".insn r 0x73, 0, 0x33, x0, x0, x0", + options(nostack, preserves_flags) + ); } /// Reads the floating-point rounding mode register `frm` @@ -574,6 +717,12 @@ pub unsafe fn hinval_gvma_all() { #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn frrm() -> u32 { let value: u32; - unsafe { asm!("frrm {}", out(reg) value, options(nomem, nostack)) }; + unsafe { + asm!( + "frrm {}", + out(reg) value, + options(nomem, nostack, preserves_flags) + ); + } value } diff --git a/library/stdarch/crates/core_arch/src/riscv_shared/zb.rs b/library/stdarch/crates/core_arch/src/riscv_shared/zb.rs index 9472e3c8be9f6..514afd9080920 100644 --- a/library/stdarch/crates/core_arch/src/riscv_shared/zb.rs +++ b/library/stdarch/crates/core_arch/src/riscv_shared/zb.rs @@ -68,7 +68,7 @@ pub fn orc_b(rs: usize) -> usize { /// /// Section: 2.11 #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] -#[target_feature(enable = "zbc")] +#[target_feature(enable = "zbkc")] #[cfg_attr(test, assert_instr(clmul))] #[inline] pub fn clmul(rs1: usize, rs2: usize) -> usize { @@ -93,7 +93,7 @@ pub fn clmul(rs1: usize, rs2: usize) -> usize { /// /// Section: 2.12 #[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] -#[target_feature(enable = "zbc")] +#[target_feature(enable = "zbkc")] #[cfg_attr(test, assert_instr(clmulh))] #[inline] pub fn clmulh(rs1: usize, rs2: usize) -> usize { diff --git a/library/stdarch/crates/core_arch/src/s390x/vector.rs b/library/stdarch/crates/core_arch/src/s390x/vector.rs index 0ce720a9244cd..f018344ead12d 100644 --- a/library/stdarch/crates/core_arch/src/s390x/vector.rs +++ b/library/stdarch/crates/core_arch/src/s390x/vector.rs @@ -94,8 +94,6 @@ unsafe extern "unadjusted" { #[link_name = "llvm.s390.vsrlb"] fn vsrlb(a: vector_signed_char, b: vector_signed_char) -> vector_signed_char; #[link_name = "llvm.s390.vslb"] fn vslb(a: vector_signed_char, b: vector_signed_char) -> vector_signed_char; - #[link_name = "llvm.s390.vsldb"] fn vsldb(a: i8x16, b: i8x16, c: u32) -> i8x16; - #[link_name = "llvm.s390.vsld"] fn vsld(a: i8x16, b: i8x16, c: u32) -> i8x16; #[link_name = "llvm.s390.vsrd"] fn vsrd(a: i8x16, b: i8x16, c: u32) -> i8x16; #[link_name = "llvm.s390.verimb"] fn verimb(a: vector_signed_char, b: vector_signed_char, c: vector_signed_char, d: i32) -> vector_signed_char; @@ -114,6 +112,9 @@ unsafe extern "unadjusted" { #[link_name = "llvm.s390.vsumqf"] fn vsumqf(a: vector_unsigned_int, b: vector_unsigned_int) -> u128; #[link_name = "llvm.s390.vsumqg"] fn vsumqg(a: vector_unsigned_long_long, b: vector_unsigned_long_long) -> u128; + #[link_name = "llvm.s390.vaccq"] fn vaccq(a: u128, b: u128) -> u128; + #[link_name = "llvm.s390.vacccq"] fn vacccq(a: u128, b: u128, c: u128) -> u128; + #[link_name = "llvm.s390.vscbiq"] fn vscbiq(a: u128, b: u128) -> u128; #[link_name = "llvm.s390.vsbiq"] fn vsbiq(a: u128, b: u128, c: u128) -> u128; #[link_name = "llvm.s390.vsbcbiq"] fn vsbcbiq(a: u128, b: u128, c: u128) -> u128; @@ -166,13 +167,6 @@ unsafe extern "unadjusted" { #[link_name = "llvm.s390.vpklsfs"] fn vpklsfs(a: vector_unsigned_int, b: vector_unsigned_int) -> PackedTuple; #[link_name = "llvm.s390.vpklsgs"] fn vpklsgs(a: vector_unsigned_long_long, b: vector_unsigned_long_long) -> PackedTuple; - #[link_name = "llvm.s390.vuplb"] fn vuplb (a: vector_signed_char) -> vector_signed_short; - #[link_name = "llvm.s390.vuplhw"] fn vuplhw (a: vector_signed_short) -> vector_signed_int; - #[link_name = "llvm.s390.vuplf"] fn vuplf (a: vector_signed_int) -> vector_signed_long_long; - #[link_name = "llvm.s390.vupllb"] fn vupllb (a: vector_unsigned_char) -> vector_unsigned_short; - #[link_name = "llvm.s390.vupllh"] fn vupllh (a: vector_unsigned_short) -> vector_unsigned_int; - #[link_name = "llvm.s390.vupllf"] fn vupllf (a: vector_unsigned_int) -> vector_unsigned_long_long; - #[link_name = "llvm.s390.vavgb"] fn vavgb(a: vector_signed_char, b: vector_signed_char) -> vector_signed_char; #[link_name = "llvm.s390.vavgh"] fn vavgh(a: vector_signed_short, b: vector_signed_short) -> vector_signed_short; #[link_name = "llvm.s390.vavgf"] fn vavgf(a: vector_signed_int, b: vector_signed_int) -> vector_signed_int; @@ -185,22 +179,6 @@ unsafe extern "unadjusted" { #[link_name = "llvm.s390.vcksm"] fn vcksm(a: vector_unsigned_int, b: vector_unsigned_int) -> vector_unsigned_int; - #[link_name = "llvm.s390.vmeb"] fn vmeb(a: vector_signed_char, b: vector_signed_char) -> vector_signed_short; - #[link_name = "llvm.s390.vmeh"] fn vmeh(a: vector_signed_short, b: vector_signed_short) -> vector_signed_int; - #[link_name = "llvm.s390.vmef"] fn vmef(a: vector_signed_int, b: vector_signed_int) -> vector_signed_long_long; - - #[link_name = "llvm.s390.vmleb"] fn vmleb(a: vector_unsigned_char, b: vector_unsigned_char) -> vector_unsigned_short; - #[link_name = "llvm.s390.vmleh"] fn vmleh(a: vector_unsigned_short, b: vector_unsigned_short) -> vector_unsigned_int; - #[link_name = "llvm.s390.vmlef"] fn vmlef(a: vector_unsigned_int, b: vector_unsigned_int) -> vector_unsigned_long_long; - - #[link_name = "llvm.s390.vmob"] fn vmob(a: vector_signed_char, b: vector_signed_char) -> vector_signed_short; - #[link_name = "llvm.s390.vmoh"] fn vmoh(a: vector_signed_short, b: vector_signed_short) -> vector_signed_int; - #[link_name = "llvm.s390.vmof"] fn vmof(a: vector_signed_int, b: vector_signed_int) -> vector_signed_long_long; - - #[link_name = "llvm.s390.vmlob"] fn vmlob(a: vector_unsigned_char, b: vector_unsigned_char) -> vector_unsigned_short; - #[link_name = "llvm.s390.vmloh"] fn vmloh(a: vector_unsigned_short, b: vector_unsigned_short) -> vector_unsigned_int; - #[link_name = "llvm.s390.vmlof"] fn vmlof(a: vector_unsigned_int, b: vector_unsigned_int) -> vector_unsigned_long_long; - #[link_name = "llvm.s390.vmhb"] fn vmhb(a: vector_signed_char, b: vector_signed_char) -> vector_signed_char; #[link_name = "llvm.s390.vmhh"] fn vmhh(a: vector_signed_short, b: vector_signed_short) -> vector_signed_short; #[link_name = "llvm.s390.vmhf"] fn vmhf(a: vector_signed_int, b: vector_signed_int) -> vector_signed_int; @@ -376,7 +354,20 @@ impl ShuffleMask { ShuffleMask(mask) } - const fn pack() -> Self { + const fn even() -> Self { + let mut mask = [0; N]; + let mut i = 0; + let mut index = 0; + while index < N { + mask[index] = i as u32; + + i += 2; + index += 1; + } + ShuffleMask(mask) + } + + const fn odd() -> Self { let mut mask = [0; N]; let mut i = 1; let mut index = 0; @@ -389,6 +380,10 @@ impl ShuffleMask { ShuffleMask(mask) } + const fn pack() -> Self { + Self::odd() + } + const fn unpack_low() -> Self { let mut mask = [0; N]; let mut i = 0; @@ -1198,10 +1193,8 @@ mod sealed { test_impl! { vec_roundc_f32 (a: vector_float) -> vector_float [nearbyint_v4f32, "vector-enhancements-1" vfisb] } test_impl! { vec_roundc_f64 (a: vector_double) -> vector_double [nearbyint_v2f64, vfidb] } - // FIXME(llvm) llvm trunk already lowers roundeven to vfidb, but rust does not use it yet - // use https://godbolt.org/z/cWq95fexe to check, and enable the instruction test when it works - test_impl! { vec_round_f32 (a: vector_float) -> vector_float [roundeven_v4f32, _] } - test_impl! { vec_round_f64 (a: vector_double) -> vector_double [roundeven_v2f64, _] } + test_impl! { vec_round_f32 (a: vector_float) -> vector_float [roundeven_v4f32, "vector-enhancements-1" vfisb] } + test_impl! { vec_round_f64 (a: vector_double) -> vector_double [roundeven_v2f64, vfidb] } #[unstable(feature = "stdarch_s390x", issue = "135681")] pub trait VectorRoundc { @@ -2359,6 +2352,9 @@ mod sealed { unsafe fn vec_packs(self, b: Other) -> Self::Result; } + // FIXME(llvm): https://github.com/llvm/llvm-project/issues/153655 + // Other targets can use a min/max for the saturation + a truncation. + impl_vec_trait! { [VectorPacks vec_packs] vpksh (vector_signed_short, vector_signed_short) -> vector_signed_char } impl_vec_trait! { [VectorPacks vec_packs] vpklsh (vector_unsigned_short, vector_unsigned_short) -> vector_unsigned_char } impl_vec_trait! { [VectorPacks vec_packs] vpksf (vector_signed_int, vector_signed_int) -> vector_signed_short } @@ -2580,8 +2576,14 @@ mod sealed { unsafe fn vec_unpackl(self) -> Self::Result; } - // FIXME(llvm): a shuffle + simd_as does not currently optimize into a single instruction like - // unpachk above. Tracked in https://github.com/llvm/llvm-project/issues/129576. + // NOTE: `vuplh` is used for "unpack logical high", hence `vuplhw`. + impl_vec_unpack!(unpack_low vuplb vector_signed_char i8x8 vector_signed_short 8); + impl_vec_unpack!(unpack_low vuplhw vector_signed_short i16x4 vector_signed_int 4); + impl_vec_unpack!(unpack_low vuplf vector_signed_int i32x2 vector_signed_long_long 2); + + impl_vec_unpack!(unpack_low vupllb vector_unsigned_char u8x8 vector_unsigned_short 8); + impl_vec_unpack!(unpack_low vupllh vector_unsigned_short u16x4 vector_unsigned_int 4); + impl_vec_unpack!(unpack_low vupllf vector_unsigned_int u32x2 vector_unsigned_long_long 2); impl_vec_trait! {[VectorUnpackl vec_unpackl] vuplb (vector_signed_char) -> vector_signed_short} impl_vec_trait! {[VectorUnpackl vec_unpackl] vuplhw (vector_signed_short) -> vector_signed_int} @@ -2642,61 +2644,65 @@ mod sealed { unsafe fn vec_mule(self, b: Self) -> Result; } - // FIXME(llvm) sadly this does not yet work https://github.com/llvm/llvm-project/issues/129705 - // #[target_feature(enable = "vector")] - // #[cfg_attr(test, assert_instr(vmleh))] - // unsafe fn vec_vmleh(a: vector_unsigned_short, b: vector_unsigned_short) -> vector_unsigned_int { - // let even_a: vector_unsigned_int = simd_as(simd_shuffle::<_, _, u16x4>( - // a, - // a, - // const { ShuffleMask([0, 2, 4, 6]) }, - // )); - // - // let even_b: vector_unsigned_int = simd_as(simd_shuffle::<_, _, u16x4>( - // b, - // b, - // const { ShuffleMask([0, 2, 4, 6]) }, - // )); - // - // simd_mul(even_a, even_b) - // } + macro_rules! impl_vec_mul_even_odd { + ($mask:ident $instr:ident $src:ident $shuffled:ident $dst:ident $width:literal) => { + #[inline] + #[target_feature(enable = "vector")] + #[cfg_attr(test, assert_instr($instr))] + unsafe fn $instr(a: $src, b: $src) -> $dst { + let elems_a: $dst = simd_as(simd_shuffle::<_, _, $shuffled>( + a, + a, // this argument is ignored entirely. + const { ShuffleMask::<$width>::$mask() }, + )); - test_impl! { vec_vmeb(a: vector_signed_char, b: vector_signed_char) -> vector_signed_short [ vmeb, vmeb ] } - test_impl! { vec_vmeh(a: vector_signed_short, b: vector_signed_short) -> vector_signed_int[ vmeh, vmeh ] } - test_impl! { vec_vmef(a: vector_signed_int, b: vector_signed_int) -> vector_signed_long_long [ vmef, vmef ] } + let elems_b: $dst = simd_as(simd_shuffle::<_, _, $shuffled>( + b, + b, // this argument is ignored entirely. + const { ShuffleMask::<$width>::$mask() }, + )); - test_impl! { vec_vmleb(a: vector_unsigned_char, b: vector_unsigned_char) -> vector_unsigned_short [ vmleb, vmleb ] } - test_impl! { vec_vmleh(a: vector_unsigned_short, b: vector_unsigned_short) -> vector_unsigned_int[ vmleh, vmleh ] } - test_impl! { vec_vmlef(a: vector_unsigned_int, b: vector_unsigned_int) -> vector_unsigned_long_long [ vmlef, vmlef ] } + simd_mul(elems_a, elems_b) + } + }; + } + + impl_vec_mul_even_odd! { even vmeb vector_signed_char i8x8 vector_signed_short 8 } + impl_vec_mul_even_odd! { even vmeh vector_signed_short i16x4 vector_signed_int 4 } + impl_vec_mul_even_odd! { even vmef vector_signed_int i32x2 vector_signed_long_long 2 } - impl_mul!([VectorMule vec_mule] vec_vmeb (vector_signed_char, vector_signed_char) -> vector_signed_short ); - impl_mul!([VectorMule vec_mule] vec_vmeh (vector_signed_short, vector_signed_short) -> vector_signed_int); - impl_mul!([VectorMule vec_mule] vec_vmef (vector_signed_int, vector_signed_int) -> vector_signed_long_long ); + impl_vec_mul_even_odd! { even vmleb vector_unsigned_char u8x8 vector_unsigned_short 8 } + impl_vec_mul_even_odd! { even vmleh vector_unsigned_short u16x4 vector_unsigned_int 4 } + impl_vec_mul_even_odd! { even vmlef vector_unsigned_int u32x2 vector_unsigned_long_long 2 } - impl_mul!([VectorMule vec_mule] vec_vmleb (vector_unsigned_char, vector_unsigned_char) -> vector_unsigned_short ); - impl_mul!([VectorMule vec_mule] vec_vmleh (vector_unsigned_short, vector_unsigned_short) -> vector_unsigned_int); - impl_mul!([VectorMule vec_mule] vec_vmlef (vector_unsigned_int, vector_unsigned_int) -> vector_unsigned_long_long ); + impl_mul!([VectorMule vec_mule] vmeb (vector_signed_char, vector_signed_char) -> vector_signed_short ); + impl_mul!([VectorMule vec_mule] vmeh (vector_signed_short, vector_signed_short) -> vector_signed_int); + impl_mul!([VectorMule vec_mule] vmef (vector_signed_int, vector_signed_int) -> vector_signed_long_long ); + + impl_mul!([VectorMule vec_mule] vmleb (vector_unsigned_char, vector_unsigned_char) -> vector_unsigned_short ); + impl_mul!([VectorMule vec_mule] vmleh (vector_unsigned_short, vector_unsigned_short) -> vector_unsigned_int); + impl_mul!([VectorMule vec_mule] vmlef (vector_unsigned_int, vector_unsigned_int) -> vector_unsigned_long_long ); #[unstable(feature = "stdarch_s390x", issue = "135681")] pub trait VectorMulo { unsafe fn vec_mulo(self, b: Self) -> Result; } - test_impl! { vec_vmob(a: vector_signed_char, b: vector_signed_char) -> vector_signed_short [ vmob, vmob ] } - test_impl! { vec_vmoh(a: vector_signed_short, b: vector_signed_short) -> vector_signed_int[ vmoh, vmoh ] } - test_impl! { vec_vmof(a: vector_signed_int, b: vector_signed_int) -> vector_signed_long_long [ vmof, vmof ] } + impl_vec_mul_even_odd! { odd vmob vector_signed_char i8x8 vector_signed_short 8 } + impl_vec_mul_even_odd! { odd vmoh vector_signed_short i16x4 vector_signed_int 4 } + impl_vec_mul_even_odd! { odd vmof vector_signed_int i32x2 vector_signed_long_long 2 } - test_impl! { vec_vmlob(a: vector_unsigned_char, b: vector_unsigned_char) -> vector_unsigned_short [ vmlob, vmlob ] } - test_impl! { vec_vmloh(a: vector_unsigned_short, b: vector_unsigned_short) -> vector_unsigned_int[ vmloh, vmloh ] } - test_impl! { vec_vmlof(a: vector_unsigned_int, b: vector_unsigned_int) -> vector_unsigned_long_long [ vmlof, vmlof ] } + impl_vec_mul_even_odd! { odd vmlob vector_unsigned_char u8x8 vector_unsigned_short 8 } + impl_vec_mul_even_odd! { odd vmloh vector_unsigned_short u16x4 vector_unsigned_int 4 } + impl_vec_mul_even_odd! { odd vmlof vector_unsigned_int u32x2 vector_unsigned_long_long 2 } - impl_mul!([VectorMulo vec_mulo] vec_vmob (vector_signed_char, vector_signed_char) -> vector_signed_short ); - impl_mul!([VectorMulo vec_mulo] vec_vmoh (vector_signed_short, vector_signed_short) -> vector_signed_int); - impl_mul!([VectorMulo vec_mulo] vec_vmof (vector_signed_int, vector_signed_int) -> vector_signed_long_long ); + impl_mul!([VectorMulo vec_mulo] vmob (vector_signed_char, vector_signed_char) -> vector_signed_short ); + impl_mul!([VectorMulo vec_mulo] vmoh (vector_signed_short, vector_signed_short) -> vector_signed_int); + impl_mul!([VectorMulo vec_mulo] vmof (vector_signed_int, vector_signed_int) -> vector_signed_long_long ); - impl_mul!([VectorMulo vec_mulo] vec_vmlob (vector_unsigned_char, vector_unsigned_char) -> vector_unsigned_short ); - impl_mul!([VectorMulo vec_mulo] vec_vmloh (vector_unsigned_short, vector_unsigned_short) -> vector_unsigned_int); - impl_mul!([VectorMulo vec_mulo] vec_vmlof (vector_unsigned_int, vector_unsigned_int) -> vector_unsigned_long_long ); + impl_mul!([VectorMulo vec_mulo] vmlob (vector_unsigned_char, vector_unsigned_char) -> vector_unsigned_short ); + impl_mul!([VectorMulo vec_mulo] vmloh (vector_unsigned_short, vector_unsigned_short) -> vector_unsigned_int); + impl_mul!([VectorMulo vec_mulo] vmlof (vector_unsigned_int, vector_unsigned_int) -> vector_unsigned_long_long ); #[unstable(feature = "stdarch_s390x", issue = "135681")] pub trait VectorMulh { @@ -3319,8 +3325,7 @@ mod sealed { #[inline] #[target_feature(enable = "vector")] - // FIXME(llvm): https://github.com/llvm/llvm-project/issues/129899 - // #[cfg_attr(test, assert_instr(vsegb))] + #[cfg_attr(test, assert_instr(vsegb))] pub unsafe fn vsegb(a: vector_signed_char) -> vector_signed_long_long { simd_as(simd_shuffle::<_, _, i8x2>( a, @@ -3331,8 +3336,7 @@ mod sealed { #[inline] #[target_feature(enable = "vector")] - // FIXME(llvm): https://github.com/llvm/llvm-project/issues/129899 - // #[cfg_attr(test, assert_instr(vsegh))] + #[cfg_attr(test, assert_instr(vsegh))] pub unsafe fn vsegh(a: vector_signed_short) -> vector_signed_long_long { simd_as(simd_shuffle::<_, _, i16x2>( a, @@ -3343,8 +3347,7 @@ mod sealed { #[inline] #[target_feature(enable = "vector")] - // FIXME(llvm): https://github.com/llvm/llvm-project/issues/129899 - // #[cfg_attr(test, assert_instr(vsegf))] + #[cfg_attr(test, assert_instr(vsegf))] pub unsafe fn vsegf(a: vector_signed_int) -> vector_signed_long_long { simd_as(simd_shuffle::<_, _, i32x2>( a, @@ -3482,10 +3485,33 @@ mod sealed { unsafe fn vec_sldb(self, b: Self) -> Self; } - // FIXME(llvm) https://github.com/llvm/llvm-project/issues/129955 - // ideally we could implement this in terms of llvm.fshl.i128 - // #[link_name = "llvm.fshl.i128"] fn fshl_i128(a: u128, b: u128, c: u128) -> u128; - // transmute(fshl_i128(transmute(a), transmute(b), const { C * 8 } )) + #[inline] + #[target_feature(enable = "vector")] + #[cfg_attr(test, assert_instr(vsldb))] + unsafe fn test_vec_sld(a: vector_signed_int, b: vector_signed_int) -> vector_signed_int { + a.vec_sld::<13>(b) + } + + #[inline] + #[target_feature(enable = "vector")] + #[cfg_attr(test, assert_instr(vsldb))] + unsafe fn test_vec_sldw(a: vector_signed_int, b: vector_signed_int) -> vector_signed_int { + a.vec_sldw::<3>(b) + } + + #[inline] + #[target_feature(enable = "vector-enhancements-2")] + #[cfg_attr(test, assert_instr(vsld))] + unsafe fn test_vec_sldb(a: vector_signed_int, b: vector_signed_int) -> vector_signed_int { + a.vec_sldb::<7>(b) + } + + #[inline] + #[target_feature(enable = "vector-enhancements-2")] + #[cfg_attr(test, assert_instr(vsrd))] + unsafe fn test_vec_srdb(a: vector_signed_int, b: vector_signed_int) -> vector_signed_int { + a.vec_srdb::<7>(b) + } macro_rules! impl_vec_sld { ($($ty:ident)*) => { @@ -3496,21 +3522,21 @@ mod sealed { #[target_feature(enable = "vector")] unsafe fn vec_sld(self, b: Self) -> Self { static_assert_uimm_bits!(C, 4); - transmute(vsldb(transmute(self), transmute(b), C)) + transmute(u128::funnel_shl(transmute(self), transmute(b), C * 8)) } #[inline] #[target_feature(enable = "vector")] unsafe fn vec_sldw(self, b: Self) -> Self { static_assert_uimm_bits!(C, 2); - transmute(vsldb(transmute(self), transmute(b), const { 4 * C })) + transmute(u128::funnel_shl(transmute(self), transmute(b), C * 4 * 8)) } #[inline] #[target_feature(enable = "vector-enhancements-2")] unsafe fn vec_sldb(self, b: Self) -> Self { static_assert_uimm_bits!(C, 3); - transmute(vsld(transmute(self), transmute(b), C)) + transmute(u128::funnel_shl(transmute(self), transmute(b), C)) } } @@ -3521,6 +3547,11 @@ mod sealed { unsafe fn vec_srdb(self, b: Self) -> Self { static_assert_uimm_bits!(C, 3); transmute(vsrd(transmute(self), transmute(b), C)) + // FIXME(llvm): https://github.com/llvm/llvm-project/issues/129955#issuecomment-3207488190 + // LLVM currently rewrites `fshr` to `fshl`, and the logic in the s390x + // backend cannot deal with that yet. + // #[link_name = "llvm.fshr.i128"] fn fshr_i128(a: u128, b: u128, c: u128) -> u128; + // transmute(fshr_i128(transmute(self), transmute(b), const { C as u128 })) } } )* @@ -4676,11 +4707,9 @@ pub unsafe fn vec_subc_u128( a: vector_unsigned_char, b: vector_unsigned_char, ) -> vector_unsigned_char { - // FIXME(llvm) sadly this does not work https://github.com/llvm/llvm-project/issues/129608 - // let a: u128 = transmute(a); - // let b: u128 = transmute(b); - // transmute(!a.overflowing_sub(b).1 as u128) - transmute(vscbiq(transmute(a), transmute(b))) + let a: u128 = transmute(a); + let b: u128 = transmute(b); + transmute(!a.overflowing_sub(b).1 as u128) } /// Vector Add Compute Carryout unsigned 128-bits @@ -4694,7 +4723,9 @@ pub unsafe fn vec_addc_u128( ) -> vector_unsigned_char { let a: u128 = transmute(a); let b: u128 = transmute(b); - transmute(a.overflowing_add(b).1 as u128) + // FIXME(llvm) https://github.com/llvm/llvm-project/pull/153557 + // transmute(a.overflowing_add(b).1 as u128) + transmute(vaccq(a, b)) } /// Vector Add With Carry unsigned 128-bits @@ -4710,7 +4741,7 @@ pub unsafe fn vec_adde_u128( let a: u128 = transmute(a); let b: u128 = transmute(b); let c: u128 = transmute(c); - // FIXME(llvm) sadly this does not work + // FIXME(llvm) https://github.com/llvm/llvm-project/pull/153557 // let (d, _carry) = a.carrying_add(b, c & 1 != 0); // transmute(d) transmute(vacq(a, b, c)) @@ -4729,8 +4760,10 @@ pub unsafe fn vec_addec_u128( let a: u128 = transmute(a); let b: u128 = transmute(b); let c: u128 = transmute(c); - let (_d, carry) = a.carrying_add(b, c & 1 != 0); - transmute(carry as u128) + // FIXME(llvm) https://github.com/llvm/llvm-project/pull/153557 + // let (_d, carry) = a.carrying_add(b, c & 1 != 0); + // transmute(carry as u128) + transmute(vacccq(a, b, c)) } /// Vector Subtract with Carryout diff --git a/library/stdarch/crates/core_arch/src/wasm32/simd128.rs b/library/stdarch/crates/core_arch/src/wasm32/simd128.rs index 108bc3125c5f3..c864d6a516e08 100644 --- a/library/stdarch/crates/core_arch/src/wasm32/simd128.rs +++ b/library/stdarch/crates/core_arch/src/wasm32/simd128.rs @@ -141,16 +141,6 @@ unsafe extern "unadjusted" { fn llvm_f64x2_max(x: simd::f64x2, y: simd::f64x2) -> simd::f64x2; } -#[repr(C, packed)] -#[derive(Copy)] -struct Unaligned(T); - -impl Clone for Unaligned { - fn clone(&self) -> Unaligned { - *self - } -} - /// Loads a `v128` vector from the given heap address. /// /// This intrinsic will emit a load with an alignment of 1. While this is @@ -179,7 +169,7 @@ impl Clone for Unaligned { #[doc(alias("v128.load"))] #[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn v128_load(m: *const v128) -> v128 { - (*(m as *const Unaligned)).0 + m.read_unaligned() } /// Load eight 8-bit integers and sign extend each one to a 16-bit lane @@ -196,8 +186,8 @@ pub unsafe fn v128_load(m: *const v128) -> v128 { #[doc(alias("v128.load8x8_s"))] #[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn i16x8_load_extend_i8x8(m: *const i8) -> v128 { - let m = *(m as *const Unaligned); - simd_cast::<_, simd::i16x8>(m.0).v128() + let m = m.cast::().read_unaligned(); + simd_cast::<_, simd::i16x8>(m).v128() } /// Load eight 8-bit integers and zero extend each one to a 16-bit lane @@ -214,8 +204,8 @@ pub unsafe fn i16x8_load_extend_i8x8(m: *const i8) -> v128 { #[doc(alias("v128.load8x8_u"))] #[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn i16x8_load_extend_u8x8(m: *const u8) -> v128 { - let m = *(m as *const Unaligned); - simd_cast::<_, simd::u16x8>(m.0).v128() + let m = m.cast::().read_unaligned(); + simd_cast::<_, simd::u16x8>(m).v128() } #[stable(feature = "wasm_simd", since = "1.54.0")] @@ -235,8 +225,8 @@ pub use i16x8_load_extend_u8x8 as u16x8_load_extend_u8x8; #[doc(alias("v128.load16x4_s"))] #[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn i32x4_load_extend_i16x4(m: *const i16) -> v128 { - let m = *(m as *const Unaligned); - simd_cast::<_, simd::i32x4>(m.0).v128() + let m = m.cast::().read_unaligned(); + simd_cast::<_, simd::i32x4>(m).v128() } /// Load four 16-bit integers and zero extend each one to a 32-bit lane @@ -253,8 +243,8 @@ pub unsafe fn i32x4_load_extend_i16x4(m: *const i16) -> v128 { #[doc(alias("v128.load16x4_u"))] #[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn i32x4_load_extend_u16x4(m: *const u16) -> v128 { - let m = *(m as *const Unaligned); - simd_cast::<_, simd::u32x4>(m.0).v128() + let m = m.cast::().read_unaligned(); + simd_cast::<_, simd::u32x4>(m).v128() } #[stable(feature = "wasm_simd", since = "1.54.0")] @@ -274,8 +264,8 @@ pub use i32x4_load_extend_u16x4 as u32x4_load_extend_u16x4; #[doc(alias("v128.load32x2_s"))] #[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn i64x2_load_extend_i32x2(m: *const i32) -> v128 { - let m = *(m as *const Unaligned); - simd_cast::<_, simd::i64x2>(m.0).v128() + let m = m.cast::().read_unaligned(); + simd_cast::<_, simd::i64x2>(m).v128() } /// Load two 32-bit integers and zero extend each one to a 64-bit lane @@ -292,8 +282,8 @@ pub unsafe fn i64x2_load_extend_i32x2(m: *const i32) -> v128 { #[doc(alias("v128.load32x2_u"))] #[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn i64x2_load_extend_u32x2(m: *const u32) -> v128 { - let m = *(m as *const Unaligned); - simd_cast::<_, simd::u64x2>(m.0).v128() + let m = m.cast::().read_unaligned(); + simd_cast::<_, simd::u64x2>(m).v128() } #[stable(feature = "wasm_simd", since = "1.54.0")] @@ -453,7 +443,7 @@ pub unsafe fn v128_load64_zero(m: *const u64) -> v128 { #[doc(alias("v128.store"))] #[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn v128_store(m: *mut v128, a: v128) { - *(m as *mut Unaligned) = Unaligned(a); + m.write_unaligned(a) } /// Loads an 8-bit value from `m` and sets lane `L` of `v` to that value. diff --git a/library/stdarch/crates/core_arch/src/x86/avx512f.rs b/library/stdarch/crates/core_arch/src/x86/avx512f.rs index d53f83c0a10bc..52c6a11a43f0e 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx512f.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx512f.rs @@ -5823,7 +5823,7 @@ pub fn _mm512_maskz_roundscale_pd(k: __mmask8, a: __m512d) -> _ #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))] +#[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 16))] #[rustc_legacy_const_generics(1)] pub fn _mm256_roundscale_pd(a: __m256d) -> __m256d { unsafe { @@ -5897,7 +5897,7 @@ pub fn _mm256_maskz_roundscale_pd(k: __mmask8, a: __m256d) -> _ #[inline] #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))] +#[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 16))] #[rustc_legacy_const_generics(1)] pub fn _mm_roundscale_pd(a: __m128d) -> __m128d { unsafe { diff --git a/library/stdarch/crates/core_arch/src/x86/avx512fp16.rs b/library/stdarch/crates/core_arch/src/x86/avx512fp16.rs index 8c914803c665d..a86fc7199b83c 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx512fp16.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx512fp16.rs @@ -17282,14 +17282,14 @@ mod tests { assert_eq_m512h(a, b); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_load_sh() { let a = _mm_set_sh(1.0); let b = _mm_load_sh(addr_of!(a).cast()); assert_eq_m128h(a, b); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_load_sh() { let a = _mm_set_sh(1.0); let src = _mm_set_sh(2.); @@ -17299,7 +17299,7 @@ mod tests { assert_eq_m128h(src, b); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_load_sh() { let a = _mm_set_sh(1.0); let b = _mm_maskz_load_sh(1, addr_of!(a).cast()); @@ -17344,7 +17344,7 @@ mod tests { assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_move_sh() { let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let b = _mm_set_sh(9.0); @@ -17353,7 +17353,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_move_sh() { let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let b = _mm_set_sh(9.0); @@ -17363,7 +17363,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_move_sh() { let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let b = _mm_set_sh(9.0); @@ -17402,7 +17402,7 @@ mod tests { assert_eq_m512h(a, b); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_store_sh() { let a = _mm_set_sh(1.0); let mut b = _mm_setzero_ph(); @@ -17410,7 +17410,7 @@ mod tests { assert_eq_m128h(a, b); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_store_sh() { let a = _mm_set_sh(1.0); let mut b = _mm_setzero_ph(); @@ -17655,7 +17655,7 @@ mod tests { assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_add_round_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); @@ -17664,7 +17664,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_add_round_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); @@ -17681,7 +17681,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_add_round_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); @@ -17695,7 +17695,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_add_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); @@ -17704,7 +17704,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_add_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); @@ -17717,7 +17717,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_add_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); @@ -17945,7 +17945,7 @@ mod tests { assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_sub_round_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); @@ -17954,7 +17954,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_sub_round_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); @@ -17971,7 +17971,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_sub_round_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); @@ -17985,7 +17985,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_sub_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); @@ -17994,7 +17994,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_sub_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); @@ -18007,7 +18007,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_sub_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); @@ -18235,7 +18235,7 @@ mod tests { assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mul_round_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); @@ -18244,7 +18244,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_mul_round_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); @@ -18261,7 +18261,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_mul_round_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); @@ -18275,7 +18275,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mul_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); @@ -18284,7 +18284,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_mul_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); @@ -18297,7 +18297,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_mul_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); @@ -18457,7 +18457,7 @@ mod tests { assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_div_round_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); @@ -18466,7 +18466,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_div_round_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); @@ -18483,7 +18483,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_div_round_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); @@ -18497,7 +18497,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_div_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); @@ -18506,7 +18506,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_div_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); @@ -18519,7 +18519,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_div_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); @@ -18680,7 +18680,7 @@ mod tests { assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mul_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); @@ -18689,7 +18689,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_mul_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); @@ -18701,7 +18701,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_mul_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); @@ -18711,7 +18711,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mul_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); @@ -18720,7 +18720,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_mul_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); @@ -18730,7 +18730,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_mul_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); @@ -18888,7 +18888,7 @@ mod tests { assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_fmul_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); @@ -18897,7 +18897,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_fmul_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); @@ -18909,7 +18909,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_fmul_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); @@ -18919,7 +18919,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_fmul_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); @@ -18928,7 +18928,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_fmul_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); @@ -18938,7 +18938,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_fmul_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); @@ -19096,7 +19096,7 @@ mod tests { assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_cmul_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); @@ -19105,7 +19105,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_cmul_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); @@ -19115,7 +19115,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_cmul_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); @@ -19124,7 +19124,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_cmul_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); @@ -19133,7 +19133,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_cmul_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); @@ -19145,7 +19145,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_cmul_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); @@ -19304,7 +19304,7 @@ mod tests { assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_fcmul_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); @@ -19313,7 +19313,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_fcmul_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); @@ -19323,7 +19323,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_fcmul_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); @@ -19332,7 +19332,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_fcmul_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); @@ -19341,7 +19341,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_fcmul_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); @@ -19353,7 +19353,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_fcmul_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); @@ -19692,7 +19692,7 @@ mod tests { assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_fmadd_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); @@ -19702,7 +19702,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_fmadd_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); @@ -19715,7 +19715,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask3_fmadd_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); @@ -19728,7 +19728,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_fmadd_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); @@ -19741,7 +19741,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_fmadd_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); @@ -19751,7 +19751,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_fmadd_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); @@ -19768,7 +19768,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask3_fmadd_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); @@ -19785,7 +19785,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_fmadd_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); @@ -20002,7 +20002,7 @@ mod tests { assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_fcmadd_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); @@ -20012,7 +20012,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_fcmadd_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); @@ -20025,7 +20025,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask3_fcmadd_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); @@ -20038,7 +20038,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_fcmadd_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); @@ -20051,7 +20051,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_fcmadd_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); @@ -20061,7 +20061,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_fcmadd_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); @@ -20078,7 +20078,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask3_fcmadd_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); @@ -20095,7 +20095,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_fcmadd_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); @@ -20311,7 +20311,7 @@ mod tests { assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_fmadd_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); @@ -20321,7 +20321,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_fmadd_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); @@ -20334,7 +20334,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask3_fmadd_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); @@ -20347,7 +20347,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_fmadd_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); @@ -20360,7 +20360,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_fmadd_round_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); @@ -20370,7 +20370,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_fmadd_round_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); @@ -20387,7 +20387,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask3_fmadd_round_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); @@ -20404,7 +20404,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_fmadd_round_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); @@ -20620,7 +20620,7 @@ mod tests { assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_fmsub_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); @@ -20630,7 +20630,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_fmsub_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); @@ -20643,7 +20643,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask3_fmsub_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); @@ -20656,7 +20656,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_fmsub_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); @@ -20669,7 +20669,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_fmsub_round_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); @@ -20930,7 +20930,7 @@ mod tests { assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_fnmadd_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); @@ -20940,7 +20940,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_fnmadd_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); @@ -20953,7 +20953,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask3_fnmadd_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); @@ -20966,7 +20966,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_fnmadd_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); @@ -20979,7 +20979,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_fnmadd_round_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); @@ -20989,7 +20989,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_fnmadd_round_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); @@ -21006,7 +21006,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask3_fnmadd_round_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); @@ -21023,7 +21023,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_fnmadd_round_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); @@ -21240,7 +21240,7 @@ mod tests { assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_fnmsub_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); @@ -21250,7 +21250,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_fnmsub_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); @@ -21263,7 +21263,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask3_fnmsub_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); @@ -21276,7 +21276,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_fnmsub_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); @@ -21289,7 +21289,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_fnmsub_round_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); @@ -21299,7 +21299,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_fnmsub_round_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); @@ -21316,7 +21316,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask3_fnmsub_round_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); @@ -21333,7 +21333,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_fnmsub_round_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); @@ -21851,7 +21851,7 @@ mod tests { assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_rcp_sh() { let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); @@ -21860,7 +21860,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_rcp_sh() { let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); @@ -21873,7 +21873,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_rcp_sh() { let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); @@ -21970,7 +21970,7 @@ mod tests { assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_rsqrt_sh() { let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); @@ -21979,7 +21979,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_rsqrt_sh() { let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); @@ -21992,7 +21992,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_rsqrt_sh() { let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); @@ -22127,7 +22127,7 @@ mod tests { assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_sqrt_sh() { let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); @@ -22136,7 +22136,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_sqrt_sh() { let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); @@ -22149,7 +22149,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_sqrt_sh() { let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); @@ -22161,7 +22161,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_sqrt_round_sh() { let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); @@ -22170,7 +22170,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_sqrt_round_sh() { let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); @@ -22187,7 +22187,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_sqrt_round_sh() { let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); @@ -22338,7 +22338,7 @@ mod tests { assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_max_sh() { let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); @@ -22347,7 +22347,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_max_sh() { let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); @@ -22360,7 +22360,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_max_sh() { let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); @@ -22372,7 +22372,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_max_round_sh() { let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); @@ -22381,7 +22381,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_max_round_sh() { let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); @@ -22398,7 +22398,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_max_round_sh() { let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); @@ -22549,7 +22549,7 @@ mod tests { assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_min_sh() { let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); @@ -22558,7 +22558,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_min_sh() { let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); @@ -22571,7 +22571,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_min_sh() { let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); @@ -22583,7 +22583,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_min_round_sh() { let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); @@ -22592,7 +22592,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_min_round_sh() { let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); @@ -22609,7 +22609,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_min_round_sh() { let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); @@ -22746,7 +22746,7 @@ mod tests { assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_getexp_sh() { let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); @@ -22755,7 +22755,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_getexp_sh() { let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); @@ -22768,7 +22768,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_getexp_sh() { let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); @@ -22780,7 +22780,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_getexp_round_sh() { let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); @@ -22789,7 +22789,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_getexp_round_sh() { let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); @@ -22802,7 +22802,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_getexp_round_sh() { let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); @@ -22958,7 +22958,7 @@ mod tests { assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_getmant_sh() { let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); @@ -22967,7 +22967,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_getmant_sh() { let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); @@ -22980,7 +22980,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_getmant_sh() { let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); @@ -22992,7 +22992,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_getmant_round_sh() { let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); @@ -23003,7 +23003,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_getmant_round_sh() { let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); @@ -23024,7 +23024,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_getmant_round_sh() { let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); @@ -23167,7 +23167,7 @@ mod tests { assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_roundscale_sh() { let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); @@ -23176,7 +23176,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_roundscale_sh() { let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); @@ -23189,7 +23189,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_roundscale_sh() { let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); @@ -23201,7 +23201,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_roundscale_round_sh() { let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); @@ -23210,7 +23210,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_roundscale_round_sh() { let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); @@ -23223,7 +23223,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_roundscale_round_sh() { let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); @@ -23372,7 +23372,7 @@ mod tests { assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_scalef_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); @@ -23381,7 +23381,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_scalef_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); @@ -23394,7 +23394,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_scalef_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); @@ -23406,7 +23406,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_scalef_round_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); @@ -23415,7 +23415,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_scalef_round_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); @@ -23432,7 +23432,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_scalef_round_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); @@ -23576,7 +23576,7 @@ mod tests { assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_reduce_sh() { let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); @@ -23585,7 +23585,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_reduce_sh() { let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); @@ -23598,7 +23598,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_reduce_sh() { let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); @@ -23610,7 +23610,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_reduce_round_sh() { let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); @@ -23619,7 +23619,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_reduce_round_sh() { let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); @@ -23636,7 +23636,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_reduce_round_sh() { let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); @@ -24505,7 +24505,7 @@ mod tests { assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_cvti32_sh() { let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let r = _mm_cvti32_sh(a, 10); @@ -24513,7 +24513,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_cvt_roundi32_sh() { let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let r = _mm_cvt_roundi32_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, 10); @@ -24645,7 +24645,7 @@ mod tests { assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_cvtu32_sh() { let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let r = _mm_cvtu32_sh(a, 10); @@ -24653,7 +24653,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_cvt_roundu32_sh() { let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let r = _mm_cvt_roundu32_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, 10); @@ -24711,7 +24711,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm512_cvtepi64_ph() { let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); let r = _mm512_cvtepi64_ph(a); @@ -24719,7 +24719,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm512_mask_cvtepi64_ph() { let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); @@ -24728,7 +24728,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm512_maskz_cvtepi64_ph() { let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); let r = _mm512_maskz_cvtepi64_ph(0b01010101, a); @@ -24736,7 +24736,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm512_cvt_roundepi64_ph() { let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); let r = _mm512_cvt_roundepi64_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); @@ -24755,7 +24755,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm512_maskz_cvt_roundepi64_ph() { let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); let r = _mm512_maskz_cvt_roundepi64_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( @@ -24815,7 +24815,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm512_cvtepu64_ph() { let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); let r = _mm512_cvtepu64_ph(a); @@ -24823,7 +24823,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm512_mask_cvtepu64_ph() { let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); @@ -24832,7 +24832,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm512_maskz_cvtepu64_ph() { let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); let r = _mm512_maskz_cvtepu64_ph(0b01010101, a); @@ -24840,7 +24840,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm512_cvt_roundepu64_ph() { let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); let r = _mm512_cvt_roundepu64_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); @@ -24848,7 +24848,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm512_mask_cvt_roundepu64_ph() { let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); @@ -24859,7 +24859,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm512_maskz_cvt_roundepu64_ph() { let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); let r = _mm512_maskz_cvt_roundepu64_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( @@ -25005,7 +25005,7 @@ mod tests { assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_cvtss_sh() { let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); let b = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); @@ -25014,7 +25014,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_cvtss_sh() { let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); let b = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); @@ -25027,7 +25027,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_cvtss_sh() { let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); let b = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); @@ -25039,7 +25039,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_cvt_roundss_sh() { let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); let b = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); @@ -25048,7 +25048,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_cvt_roundss_sh() { let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); let b = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); @@ -25065,7 +25065,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_cvt_roundss_sh() { let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); let b = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); @@ -25129,7 +25129,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm512_cvtpd_ph() { let a = _mm512_set_pd(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let r = _mm512_cvtpd_ph(a); @@ -25137,7 +25137,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm512_mask_cvtpd_ph() { let a = _mm512_set_pd(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); @@ -25146,7 +25146,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm512_maskz_cvtpd_ph() { let a = _mm512_set_pd(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let r = _mm512_maskz_cvtpd_ph(0b01010101, a); @@ -25154,7 +25154,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm512_cvt_roundpd_ph() { let a = _mm512_set_pd(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let r = _mm512_cvt_roundpd_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); @@ -25162,7 +25162,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm512_mask_cvt_roundpd_ph() { let a = _mm512_set_pd(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); @@ -25173,7 +25173,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm512_maskz_cvt_roundpd_ph() { let a = _mm512_set_pd(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let r = _mm512_maskz_cvt_roundpd_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( @@ -25183,7 +25183,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_cvtsd_sh() { let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); let b = _mm_setr_pd(1.0, 2.0); @@ -25192,7 +25192,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_cvtsd_sh() { let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); let b = _mm_setr_pd(1.0, 2.0); @@ -25205,7 +25205,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_cvtsd_sh() { let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); let b = _mm_setr_pd(1.0, 2.0); @@ -25217,7 +25217,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_cvt_roundsd_sh() { let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); let b = _mm_setr_pd(1.0, 2.0); @@ -25226,7 +25226,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_mask_cvt_roundsd_sh() { let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); let b = _mm_setr_pd(1.0, 2.0); @@ -25243,7 +25243,7 @@ mod tests { assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] + #[simd_test(enable = "avx512fp16,avx512vl")] unsafe fn test_mm_maskz_cvt_roundsd_sh() { let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); let b = _mm_setr_pd(1.0, 2.0); diff --git a/library/stdarch/crates/intrinsic-test/src/arm/config.rs b/library/stdarch/crates/intrinsic-test/src/arm/config.rs index 9a7b37253d1cb..72e997de154ab 100644 --- a/library/stdarch/crates/intrinsic-test/src/arm/config.rs +++ b/library/stdarch/crates/intrinsic-test/src/arm/config.rs @@ -1,14 +1,17 @@ -pub fn build_notices(line_prefix: &str) -> String { - format!( - "\ -{line_prefix}This is a transient test file, not intended for distribution. Some aspects of the -{line_prefix}test are derived from a JSON specification, published under the same license as the -{line_prefix}`intrinsic-test` crate.\n -" - ) -} +pub const NOTICE: &str = "\ +// This is a transient test file, not intended for distribution. Some aspects of the +// test are derived from a JSON specification, published under the same license as the +// `intrinsic-test` crate.\n"; + +pub const POLY128_OSTREAM_DECL: &str = r#" +#ifdef __aarch64__ +std::ostream& operator<<(std::ostream& os, poly128_t value); +#endif +"#; -pub const POLY128_OSTREAM_DEF: &str = r#"std::ostream& operator<<(std::ostream& os, poly128_t value) { +pub const POLY128_OSTREAM_DEF: &str = r#" +#ifdef __aarch64__ +std::ostream& operator<<(std::ostream& os, poly128_t value) { std::stringstream temp; do { int n = value % 10; @@ -19,7 +22,9 @@ pub const POLY128_OSTREAM_DEF: &str = r#"std::ostream& operator<<(std::ostream& std::string res(tempstr.rbegin(), tempstr.rend()); os << res; return os; -}"#; +} +#endif +"#; // Format f16 values (and vectors containing them) in a way that is consistent with C. pub const F16_FORMATTING_DEF: &str = r#" @@ -118,4 +123,10 @@ pub const AARCH_CONFIGURATIONS: &str = r#" #![cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), feature(stdarch_neon_ftts))] #![feature(fmt_helpers_for_derive)] #![feature(stdarch_neon_f16)] + +#[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] +use core::arch::aarch64::*; + +#[cfg(target_arch = "arm")] +use core::arch::arm::*; "#; diff --git a/library/stdarch/crates/intrinsic-test/src/arm/intrinsic.rs b/library/stdarch/crates/intrinsic-test/src/arm/intrinsic.rs index fd93eff76e09c..29343bee4c300 100644 --- a/library/stdarch/crates/intrinsic-test/src/arm/intrinsic.rs +++ b/library/stdarch/crates/intrinsic-test/src/arm/intrinsic.rs @@ -1,7 +1,4 @@ -use crate::common::argument::ArgumentList; -use crate::common::indentation::Indentation; -use crate::common::intrinsic::{Intrinsic, IntrinsicDefinition}; -use crate::common::intrinsic_helpers::{IntrinsicType, IntrinsicTypeDefinition, Sign, TypeKind}; +use crate::common::intrinsic_helpers::IntrinsicType; use std::ops::{Deref, DerefMut}; #[derive(Debug, Clone, PartialEq)] @@ -23,83 +20,3 @@ impl DerefMut for ArmIntrinsicType { &mut self.data } } - -impl IntrinsicDefinition for Intrinsic { - fn arguments(&self) -> ArgumentList { - self.arguments.clone() - } - - fn results(&self) -> ArmIntrinsicType { - self.results.clone() - } - - fn name(&self) -> String { - self.name.clone() - } - - /// Generates a std::cout for the intrinsics results that will match the - /// rust debug output format for the return type. The generated line assumes - /// there is an int i in scope which is the current pass number. - fn print_result_c(&self, indentation: Indentation, additional: &str) -> String { - let lanes = if self.results().num_vectors() > 1 { - (0..self.results().num_vectors()) - .map(|vector| { - format!( - r#""{ty}(" << {lanes} << ")""#, - ty = self.results().c_single_vector_type(), - lanes = (0..self.results().num_lanes()) - .map(move |idx| -> std::string::String { - format!( - "{cast}{lane_fn}(__return_value.val[{vector}], {lane})", - cast = self.results().c_promotion(), - lane_fn = self.results().get_lane_function(), - lane = idx, - vector = vector, - ) - }) - .collect::>() - .join(r#" << ", " << "#) - ) - }) - .collect::>() - .join(r#" << ", " << "#) - } else if self.results().num_lanes() > 1 { - (0..self.results().num_lanes()) - .map(|idx| -> std::string::String { - format!( - "{cast}{lane_fn}(__return_value, {lane})", - cast = self.results().c_promotion(), - lane_fn = self.results().get_lane_function(), - lane = idx - ) - }) - .collect::>() - .join(r#" << ", " << "#) - } else { - format!( - "{promote}cast<{cast}>(__return_value)", - cast = match self.results.kind() { - TypeKind::Float if self.results().inner_size() == 16 => "float16_t".to_string(), - TypeKind::Float if self.results().inner_size() == 32 => "float".to_string(), - TypeKind::Float if self.results().inner_size() == 64 => "double".to_string(), - TypeKind::Int(Sign::Signed) => format!("int{}_t", self.results().inner_size()), - TypeKind::Int(Sign::Unsigned) => - format!("uint{}_t", self.results().inner_size()), - TypeKind::Poly => format!("poly{}_t", self.results().inner_size()), - ty => todo!("print_result_c - Unknown type: {:#?}", ty), - }, - promote = self.results().c_promotion(), - ) - }; - - format!( - r#"{indentation}std::cout << "Result {additional}-" << i+1 << ": {ty}" << std::fixed << std::setprecision(150) << {lanes} << "{close}" << std::endl;"#, - ty = if self.results().is_simd() { - format!("{}(", self.results().c_type()) - } else { - String::from("") - }, - close = if self.results.is_simd() { ")" } else { "" }, - ) - } -} diff --git a/library/stdarch/crates/intrinsic-test/src/arm/json_parser.rs b/library/stdarch/crates/intrinsic-test/src/arm/json_parser.rs index b019abab21346..65c179ef0d083 100644 --- a/library/stdarch/crates/intrinsic-test/src/arm/json_parser.rs +++ b/library/stdarch/crates/intrinsic-test/src/arm/json_parser.rs @@ -113,7 +113,7 @@ fn json_to_intrinsic( Ok(Intrinsic { name, arguments, - results: results, + results, arch_tags: intr.architectures, }) } diff --git a/library/stdarch/crates/intrinsic-test/src/arm/mod.rs b/library/stdarch/crates/intrinsic-test/src/arm/mod.rs index 51f5ac4283783..08dc2d38702cd 100644 --- a/library/stdarch/crates/intrinsic-test/src/arm/mod.rs +++ b/library/stdarch/crates/intrinsic-test/src/arm/mod.rs @@ -5,20 +5,11 @@ mod intrinsic; mod json_parser; mod types; -use std::fs::{self, File}; - -use rayon::prelude::*; - +use crate::common::SupportedArchitectureTest; use crate::common::cli::ProcessedCli; -use crate::common::compare::compare_outputs; -use crate::common::gen_c::{write_main_cpp, write_mod_cpp}; -use crate::common::gen_rust::{ - compile_rust_programs, write_bin_cargo_toml, write_lib_cargo_toml, write_lib_rs, write_main_rs, -}; +use crate::common::compile_c::CppCompilation; use crate::common::intrinsic::Intrinsic; use crate::common::intrinsic_helpers::TypeKind; -use crate::common::{SupportedArchitectureTest, chunk_info}; -use config::{AARCH_CONFIGURATIONS, F16_FORMATTING_DEF, POLY128_OSTREAM_DEF, build_notices}; use intrinsic::ArmIntrinsicType; use json_parser::get_neon_intrinsics; @@ -28,7 +19,30 @@ pub struct ArmArchitectureTest { } impl SupportedArchitectureTest for ArmArchitectureTest { - fn create(cli_options: ProcessedCli) -> Box { + type IntrinsicImpl = ArmIntrinsicType; + + fn cli_options(&self) -> &ProcessedCli { + &self.cli_options + } + + fn intrinsics(&self) -> &[Intrinsic] { + &self.intrinsics + } + + const NOTICE: &str = config::NOTICE; + + const PLATFORM_C_HEADERS: &[&str] = &["arm_neon.h", "arm_acle.h", "arm_fp16.h"]; + const PLATFORM_C_DEFINITIONS: &str = config::POLY128_OSTREAM_DEF; + const PLATFORM_C_FORWARD_DECLARATIONS: &str = config::POLY128_OSTREAM_DECL; + + const PLATFORM_RUST_DEFINITIONS: &str = config::F16_FORMATTING_DEF; + const PLATFORM_RUST_CFGS: &str = config::AARCH_CONFIGURATIONS; + + fn cpp_compilation(&self) -> Option { + compile::build_cpp_compilation(&self.cli_options) + } + + fn create(cli_options: ProcessedCli) -> Self { let a32 = cli_options.target.contains("v7"); let mut intrinsics = get_neon_intrinsics(&cli_options.filename, &cli_options.target) .expect("Error parsing input file"); @@ -50,149 +64,9 @@ impl SupportedArchitectureTest for ArmArchitectureTest { .collect::>(); intrinsics.dedup(); - Box::new(Self { + Self { intrinsics, cli_options, - }) - } - - fn build_c_file(&self) -> bool { - let c_target = "aarch64"; - let platform_headers = &["arm_neon.h", "arm_acle.h", "arm_fp16.h"]; - - let (chunk_size, chunk_count) = chunk_info(self.intrinsics.len()); - - let cpp_compiler_wrapped = compile::build_cpp_compilation(&self.cli_options); - - let notice = &build_notices("// "); - fs::create_dir_all("c_programs").unwrap(); - self.intrinsics - .par_chunks(chunk_size) - .enumerate() - .map(|(i, chunk)| { - let c_filename = format!("c_programs/mod_{i}.cpp"); - let mut file = File::create(&c_filename).unwrap(); - write_mod_cpp(&mut file, notice, c_target, platform_headers, chunk).unwrap(); - - // compile this cpp file into a .o file. - // - // This is done because `cpp_compiler_wrapped` is None when - // the --generate-only flag is passed - if let Some(cpp_compiler) = cpp_compiler_wrapped.as_ref() { - let output = cpp_compiler - .compile_object_file(&format!("mod_{i}.cpp"), &format!("mod_{i}.o"))?; - assert!(output.status.success(), "{output:?}"); - } - - Ok(()) - }) - .collect::>() - .unwrap(); - - let mut file = File::create("c_programs/main.cpp").unwrap(); - write_main_cpp( - &mut file, - c_target, - POLY128_OSTREAM_DEF, - self.intrinsics.iter().map(|i| i.name.as_str()), - ) - .unwrap(); - - // This is done because `cpp_compiler_wrapped` is None when - // the --generate-only flag is passed - if let Some(cpp_compiler) = cpp_compiler_wrapped.as_ref() { - // compile this cpp file into a .o file - info!("compiling main.cpp"); - let output = cpp_compiler - .compile_object_file("main.cpp", "intrinsic-test-programs.o") - .unwrap(); - assert!(output.status.success(), "{output:?}"); - - let object_files = (0..chunk_count) - .map(|i| format!("mod_{i}.o")) - .chain(["intrinsic-test-programs.o".to_owned()]); - - let output = cpp_compiler - .link_executable(object_files, "intrinsic-test-programs") - .unwrap(); - assert!(output.status.success(), "{output:?}"); - } - - true - } - - fn build_rust_file(&self) -> bool { - std::fs::create_dir_all("rust_programs/src").unwrap(); - - let architecture = if self.cli_options.target.contains("v7") { - "arm" - } else { - "aarch64" - }; - - let (chunk_size, chunk_count) = chunk_info(self.intrinsics.len()); - - let mut cargo = File::create("rust_programs/Cargo.toml").unwrap(); - write_bin_cargo_toml(&mut cargo, chunk_count).unwrap(); - - let mut main_rs = File::create("rust_programs/src/main.rs").unwrap(); - write_main_rs( - &mut main_rs, - chunk_count, - AARCH_CONFIGURATIONS, - "", - self.intrinsics.iter().map(|i| i.name.as_str()), - ) - .unwrap(); - - let target = &self.cli_options.target; - let toolchain = self.cli_options.toolchain.as_deref(); - let linker = self.cli_options.linker.as_deref(); - - let notice = &build_notices("// "); - self.intrinsics - .par_chunks(chunk_size) - .enumerate() - .map(|(i, chunk)| { - std::fs::create_dir_all(format!("rust_programs/mod_{i}/src"))?; - - let rust_filename = format!("rust_programs/mod_{i}/src/lib.rs"); - trace!("generating `{rust_filename}`"); - let mut file = File::create(rust_filename)?; - - let cfg = AARCH_CONFIGURATIONS; - let definitions = F16_FORMATTING_DEF; - write_lib_rs(&mut file, architecture, notice, cfg, definitions, chunk)?; - - let toml_filename = format!("rust_programs/mod_{i}/Cargo.toml"); - trace!("generating `{toml_filename}`"); - let mut file = File::create(toml_filename).unwrap(); - - write_lib_cargo_toml(&mut file, &format!("mod_{i}"))?; - - Ok(()) - }) - .collect::>() - .unwrap(); - - compile_rust_programs(toolchain, target, linker) - } - - fn compare_outputs(&self) -> bool { - if self.cli_options.toolchain.is_some() { - let intrinsics_name_list = self - .intrinsics - .iter() - .map(|i| i.name.clone()) - .collect::>(); - - compare_outputs( - &intrinsics_name_list, - &self.cli_options.runner, - &self.cli_options.target, - ) - } else { - true } } } diff --git a/library/stdarch/crates/intrinsic-test/src/arm/types.rs b/library/stdarch/crates/intrinsic-test/src/arm/types.rs index 32f8f106ce26d..e86a2c5189f0b 100644 --- a/library/stdarch/crates/intrinsic-test/src/arm/types.rs +++ b/library/stdarch/crates/intrinsic-test/src/arm/types.rs @@ -1,5 +1,6 @@ use super::intrinsic::ArmIntrinsicType; use crate::common::cli::Language; +use crate::common::indentation::Indentation; use crate::common::intrinsic_helpers::{IntrinsicType, IntrinsicTypeDefinition, Sign, TypeKind}; impl IntrinsicTypeDefinition for ArmIntrinsicType { @@ -98,6 +99,71 @@ impl IntrinsicTypeDefinition for ArmIntrinsicType { todo!("get_lane_function IntrinsicType: {:#?}", self) } } + + /// Generates a std::cout for the intrinsics results that will match the + /// rust debug output format for the return type. The generated line assumes + /// there is an int i in scope which is the current pass number. + fn print_result_c(&self, indentation: Indentation, additional: &str) -> String { + let lanes = if self.num_vectors() > 1 { + (0..self.num_vectors()) + .map(|vector| { + format!( + r#""{ty}(" << {lanes} << ")""#, + ty = self.c_single_vector_type(), + lanes = (0..self.num_lanes()) + .map(move |idx| -> std::string::String { + format!( + "{cast}{lane_fn}(__return_value.val[{vector}], {lane})", + cast = self.c_promotion(), + lane_fn = self.get_lane_function(), + lane = idx, + vector = vector, + ) + }) + .collect::>() + .join(r#" << ", " << "#) + ) + }) + .collect::>() + .join(r#" << ", " << "#) + } else if self.num_lanes() > 1 { + (0..self.num_lanes()) + .map(|idx| -> std::string::String { + format!( + "{cast}{lane_fn}(__return_value, {lane})", + cast = self.c_promotion(), + lane_fn = self.get_lane_function(), + lane = idx + ) + }) + .collect::>() + .join(r#" << ", " << "#) + } else { + format!( + "{promote}cast<{cast}>(__return_value)", + cast = match self.kind() { + TypeKind::Float if self.inner_size() == 16 => "float16_t".to_string(), + TypeKind::Float if self.inner_size() == 32 => "float".to_string(), + TypeKind::Float if self.inner_size() == 64 => "double".to_string(), + TypeKind::Int(Sign::Signed) => format!("int{}_t", self.inner_size()), + TypeKind::Int(Sign::Unsigned) => format!("uint{}_t", self.inner_size()), + TypeKind::Poly => format!("poly{}_t", self.inner_size()), + ty => todo!("print_result_c - Unknown type: {:#?}", ty), + }, + promote = self.c_promotion(), + ) + }; + + format!( + r#"{indentation}std::cout << "Result {additional}-" << i+1 << ": {ty}" << std::fixed << std::setprecision(150) << {lanes} << "{close}" << std::endl;"#, + ty = if self.is_simd() { + format!("{}(", self.c_type()) + } else { + String::from("") + }, + close = if self.is_simd() { ")" } else { "" }, + ) + } } impl ArmIntrinsicType { diff --git a/library/stdarch/crates/intrinsic-test/src/common/gen_c.rs b/library/stdarch/crates/intrinsic-test/src/common/gen_c.rs index 84755ce525053..28902b3dfe981 100644 --- a/library/stdarch/crates/intrinsic-test/src/common/gen_c.rs +++ b/library/stdarch/crates/intrinsic-test/src/common/gen_c.rs @@ -1,6 +1,7 @@ +use crate::common::intrinsic::Intrinsic; + use super::argument::Argument; use super::indentation::Indentation; -use super::intrinsic::IntrinsicDefinition; use super::intrinsic_helpers::IntrinsicTypeDefinition; // The number of times each intrinsic will be called. @@ -8,7 +9,7 @@ const PASSES: u32 = 20; pub fn generate_c_test_loop( w: &mut impl std::io::Write, - intrinsic: &dyn IntrinsicDefinition, + intrinsic: &Intrinsic, indentation: Indentation, additional: &str, passes: u32, @@ -21,16 +22,18 @@ pub fn generate_c_test_loop( {body_indentation}auto __return_value = {intrinsic_call}({args});\n\ {print_result}\n\ {indentation}}}", - loaded_args = intrinsic.arguments().load_values_c(body_indentation), - intrinsic_call = intrinsic.name(), - args = intrinsic.arguments().as_call_param_c(), - print_result = intrinsic.print_result_c(body_indentation, additional) + loaded_args = intrinsic.arguments.load_values_c(body_indentation), + intrinsic_call = intrinsic.name, + args = intrinsic.arguments.as_call_param_c(), + print_result = intrinsic + .results + .print_result_c(body_indentation, additional) ) } pub fn generate_c_constraint_blocks<'a, T: IntrinsicTypeDefinition + 'a>( w: &mut impl std::io::Write, - intrinsic: &dyn IntrinsicDefinition, + intrinsic: &Intrinsic, indentation: Indentation, constraints: &mut (impl Iterator> + Clone), name: String, @@ -63,14 +66,14 @@ pub fn generate_c_constraint_blocks<'a, T: IntrinsicTypeDefinition + 'a>( // Compiles C test programs using specified compiler pub fn create_c_test_function( w: &mut impl std::io::Write, - intrinsic: &dyn IntrinsicDefinition, + intrinsic: &Intrinsic, ) -> std::io::Result<()> { let indentation = Indentation::default(); - writeln!(w, "int run_{}() {{", intrinsic.name())?; + writeln!(w, "int run_{}() {{", intrinsic.name)?; // Define the arrays of arguments. - let arguments = intrinsic.arguments(); + let arguments = &intrinsic.arguments; arguments.gen_arglists_c(w, indentation.nested(), PASSES)?; generate_c_constraint_blocks( @@ -90,9 +93,9 @@ pub fn create_c_test_function( pub fn write_mod_cpp( w: &mut impl std::io::Write, notice: &str, - architecture: &str, platform_headers: &[&str], - intrinsics: &[impl IntrinsicDefinition], + forward_declarations: &str, + intrinsics: &[Intrinsic], ) -> std::io::Result<()> { write!(w, "{notice}")?; @@ -122,12 +125,7 @@ std::ostream& operator<<(std::ostream& os, float16_t value); "# )?; - writeln!(w, "#ifdef __{architecture}__")?; - writeln!( - w, - "std::ostream& operator<<(std::ostream& os, poly128_t value);" - )?; - writeln!(w, "#endif")?; + writeln!(w, "{}", forward_declarations)?; for intrinsic in intrinsics { create_c_test_function(w, intrinsic)?; @@ -138,7 +136,6 @@ std::ostream& operator<<(std::ostream& os, float16_t value); pub fn write_main_cpp<'a>( w: &mut impl std::io::Write, - architecture: &str, arch_specific_definitions: &str, intrinsics: impl Iterator + Clone, ) -> std::io::Result<()> { @@ -167,9 +164,8 @@ std::ostream& operator<<(std::ostream& os, float16_t value) {{ "# )?; - writeln!(w, "#ifdef __{architecture}__")?; + // NOTE: It's assumed that this value contains the required `ifdef`s. writeln!(w, "{arch_specific_definitions }")?; - writeln!(w, "#endif")?; for intrinsic in intrinsics.clone() { writeln!(w, "extern int run_{intrinsic}(void);")?; diff --git a/library/stdarch/crates/intrinsic-test/src/common/gen_rust.rs b/library/stdarch/crates/intrinsic-test/src/common/gen_rust.rs index 2a02b8fdff1df..c6b964a9ce4e4 100644 --- a/library/stdarch/crates/intrinsic-test/src/common/gen_rust.rs +++ b/library/stdarch/crates/intrinsic-test/src/common/gen_rust.rs @@ -1,8 +1,10 @@ use itertools::Itertools; use std::process::Command; +use crate::common::intrinsic::Intrinsic; + use super::indentation::Indentation; -use super::intrinsic::{IntrinsicDefinition, format_f16_return_value}; +use super::intrinsic::format_f16_return_value; use super::intrinsic_helpers::IntrinsicTypeDefinition; // The number of times each intrinsic will be called. @@ -96,11 +98,10 @@ pub fn write_main_rs<'a>( pub fn write_lib_rs( w: &mut impl std::io::Write, - architecture: &str, notice: &str, cfg: &str, definitions: &str, - intrinsics: &[impl IntrinsicDefinition], + intrinsics: &[Intrinsic], ) -> std::io::Result<()> { write!(w, "{notice}")?; @@ -115,8 +116,6 @@ pub fn write_lib_rs( writeln!(w, "{cfg}")?; - writeln!(w, "use core_arch::arch::{architecture}::*;")?; - writeln!(w, "{definitions}")?; for intrinsic in intrinsics { @@ -189,16 +188,16 @@ pub fn compile_rust_programs(toolchain: Option<&str>, target: &str, linker: Opti pub fn generate_rust_test_loop( w: &mut impl std::io::Write, - intrinsic: &dyn IntrinsicDefinition, + intrinsic: &Intrinsic, indentation: Indentation, specializations: &[Vec], passes: u32, ) -> std::io::Result<()> { - let intrinsic_name = intrinsic.name(); + let intrinsic_name = &intrinsic.name; // Each function (and each specialization) has its own type. Erase that type with a cast. let mut coerce = String::from("unsafe fn("); - for _ in intrinsic.arguments().iter().filter(|a| !a.has_constraint()) { + for _ in intrinsic.arguments.iter().filter(|a| !a.has_constraint()) { coerce += "_, "; } coerce += ") -> _"; @@ -248,13 +247,13 @@ pub fn generate_rust_test_loop( }}\n\ }}\n\ }}", - loaded_args = intrinsic.arguments().load_values_rust(indentation3), - args = intrinsic.arguments().as_call_param_rust(), + loaded_args = intrinsic.arguments.load_values_rust(indentation3), + args = intrinsic.arguments.as_call_param_rust(), ) } /// Generate the specializations (unique sequences of const-generic arguments) for this intrinsic. -fn generate_rust_specializations<'a>( +fn generate_rust_specializations( constraints: &mut impl Iterator>, ) -> Vec> { let mut specializations = vec![vec![]]; @@ -277,15 +276,15 @@ fn generate_rust_specializations<'a>( // Top-level function to create complete test program pub fn create_rust_test_module( w: &mut impl std::io::Write, - intrinsic: &dyn IntrinsicDefinition, + intrinsic: &Intrinsic, ) -> std::io::Result<()> { - trace!("generating `{}`", intrinsic.name()); + trace!("generating `{}`", intrinsic.name); let indentation = Indentation::default(); - writeln!(w, "pub fn run_{}() {{", intrinsic.name())?; + writeln!(w, "pub fn run_{}() {{", intrinsic.name)?; // Define the arrays of arguments. - let arguments = intrinsic.arguments(); + let arguments = &intrinsic.arguments; arguments.gen_arglists_rust(w, indentation.nested(), PASSES)?; // Define any const generics as `const` items, then generate the actual test loop. diff --git a/library/stdarch/crates/intrinsic-test/src/common/intrinsic.rs b/library/stdarch/crates/intrinsic-test/src/common/intrinsic.rs index bc46ccfbac40c..95276d19b72f9 100644 --- a/library/stdarch/crates/intrinsic-test/src/common/intrinsic.rs +++ b/library/stdarch/crates/intrinsic-test/src/common/intrinsic.rs @@ -1,5 +1,4 @@ use super::argument::ArgumentList; -use super::indentation::Indentation; use super::intrinsic_helpers::{IntrinsicTypeDefinition, TypeKind}; /// An intrinsic @@ -18,32 +17,14 @@ pub struct Intrinsic { pub arch_tags: Vec, } -pub trait IntrinsicDefinition -where - T: IntrinsicTypeDefinition, -{ - fn arguments(&self) -> ArgumentList; - - fn results(&self) -> T; - - fn name(&self) -> String; - - /// Generates a std::cout for the intrinsics results that will match the - /// rust debug output format for the return type. The generated line assumes - /// there is an int i in scope which is the current pass number. - fn print_result_c(&self, _indentation: Indentation, _additional: &str) -> String; -} - -pub fn format_f16_return_value( - intrinsic: &dyn IntrinsicDefinition, -) -> String { +pub fn format_f16_return_value(intrinsic: &Intrinsic) -> String { // the `intrinsic-test` crate compares the output of C and Rust intrinsics. Currently, It uses // a string representation of the output value to compare. In C, f16 values are currently printed // as hexadecimal integers. Since https://github.com/rust-lang/rust/pull/127013, rust does print // them as decimal floating point values. To keep the intrinsics tests working, for now, format // vectors containing f16 values like C prints them. - let return_value = match intrinsic.results().kind() { - TypeKind::Float if intrinsic.results().inner_size() == 16 => "debug_f16(__return_value)", + let return_value = match intrinsic.results.kind() { + TypeKind::Float if intrinsic.results.inner_size() == 16 => "debug_f16(__return_value)", _ => "format_args!(\"{__return_value:.150?}\")", }; diff --git a/library/stdarch/crates/intrinsic-test/src/common/intrinsic_helpers.rs b/library/stdarch/crates/intrinsic-test/src/common/intrinsic_helpers.rs index f5e84ca97af21..7bc1015a387c1 100644 --- a/library/stdarch/crates/intrinsic-test/src/common/intrinsic_helpers.rs +++ b/library/stdarch/crates/intrinsic-test/src/common/intrinsic_helpers.rs @@ -325,4 +325,9 @@ pub trait IntrinsicTypeDefinition: Deref { /// can be directly defined in `impl` blocks fn c_single_vector_type(&self) -> String; + + /// Generates a std::cout for the intrinsics results that will match the + /// rust debug output format for the return type. The generated line assumes + /// there is an int i in scope which is the current pass number. + fn print_result_c(&self, indentation: Indentation, additional: &str) -> String; } diff --git a/library/stdarch/crates/intrinsic-test/src/common/mod.rs b/library/stdarch/crates/intrinsic-test/src/common/mod.rs index 5a57c8027db9b..666b3885c147b 100644 --- a/library/stdarch/crates/intrinsic-test/src/common/mod.rs +++ b/library/stdarch/crates/intrinsic-test/src/common/mod.rs @@ -1,5 +1,20 @@ +use std::fs::File; + +use rayon::prelude::*; + use cli::ProcessedCli; +use crate::common::{ + compile_c::CppCompilation, + gen_c::{write_main_cpp, write_mod_cpp}, + gen_rust::{ + compile_rust_programs, write_bin_cargo_toml, write_lib_cargo_toml, write_lib_rs, + write_main_rs, + }, + intrinsic::Intrinsic, + intrinsic_helpers::IntrinsicTypeDefinition, +}; + pub mod argument; pub mod cli; pub mod compare; @@ -15,12 +30,162 @@ pub mod values; /// Architectures must support this trait /// to be successfully tested. pub trait SupportedArchitectureTest { - fn create(cli_options: ProcessedCli) -> Box - where - Self: Sized; - fn build_c_file(&self) -> bool; - fn build_rust_file(&self) -> bool; - fn compare_outputs(&self) -> bool; + type IntrinsicImpl: IntrinsicTypeDefinition + Sync; + + fn cli_options(&self) -> &ProcessedCli; + fn intrinsics(&self) -> &[Intrinsic]; + + fn create(cli_options: ProcessedCli) -> Self; + + const NOTICE: &str; + + const PLATFORM_C_HEADERS: &[&str]; + const PLATFORM_C_DEFINITIONS: &str; + const PLATFORM_C_FORWARD_DECLARATIONS: &str; + + const PLATFORM_RUST_CFGS: &str; + const PLATFORM_RUST_DEFINITIONS: &str; + + fn cpp_compilation(&self) -> Option; + + fn build_c_file(&self) -> bool { + let (chunk_size, chunk_count) = chunk_info(self.intrinsics().len()); + + let cpp_compiler_wrapped = self.cpp_compilation(); + + std::fs::create_dir_all("c_programs").unwrap(); + self.intrinsics() + .par_chunks(chunk_size) + .enumerate() + .map(|(i, chunk)| { + let c_filename = format!("c_programs/mod_{i}.cpp"); + let mut file = File::create(&c_filename).unwrap(); + write_mod_cpp( + &mut file, + Self::NOTICE, + Self::PLATFORM_C_HEADERS, + Self::PLATFORM_C_FORWARD_DECLARATIONS, + chunk, + ) + .unwrap(); + + // compile this cpp file into a .o file. + // + // This is done because `cpp_compiler_wrapped` is None when + // the --generate-only flag is passed + if let Some(cpp_compiler) = cpp_compiler_wrapped.as_ref() { + let output = cpp_compiler + .compile_object_file(&format!("mod_{i}.cpp"), &format!("mod_{i}.o"))?; + assert!(output.status.success(), "{output:?}"); + } + + Ok(()) + }) + .collect::>() + .unwrap(); + + let mut file = File::create("c_programs/main.cpp").unwrap(); + write_main_cpp( + &mut file, + Self::PLATFORM_C_DEFINITIONS, + self.intrinsics().iter().map(|i| i.name.as_str()), + ) + .unwrap(); + + // This is done because `cpp_compiler_wrapped` is None when + // the --generate-only flag is passed + if let Some(cpp_compiler) = cpp_compiler_wrapped.as_ref() { + // compile this cpp file into a .o file + info!("compiling main.cpp"); + let output = cpp_compiler + .compile_object_file("main.cpp", "intrinsic-test-programs.o") + .unwrap(); + assert!(output.status.success(), "{output:?}"); + + let object_files = (0..chunk_count) + .map(|i| format!("mod_{i}.o")) + .chain(["intrinsic-test-programs.o".to_owned()]); + + let output = cpp_compiler + .link_executable(object_files, "intrinsic-test-programs") + .unwrap(); + assert!(output.status.success(), "{output:?}"); + } + + true + } + + fn build_rust_file(&self) -> bool { + std::fs::create_dir_all("rust_programs/src").unwrap(); + + let (chunk_size, chunk_count) = chunk_info(self.intrinsics().len()); + + let mut cargo = File::create("rust_programs/Cargo.toml").unwrap(); + write_bin_cargo_toml(&mut cargo, chunk_count).unwrap(); + + let mut main_rs = File::create("rust_programs/src/main.rs").unwrap(); + write_main_rs( + &mut main_rs, + chunk_count, + Self::PLATFORM_RUST_CFGS, + "", + self.intrinsics().iter().map(|i| i.name.as_str()), + ) + .unwrap(); + + let target = &self.cli_options().target; + let toolchain = self.cli_options().toolchain.as_deref(); + let linker = self.cli_options().linker.as_deref(); + + self.intrinsics() + .par_chunks(chunk_size) + .enumerate() + .map(|(i, chunk)| { + std::fs::create_dir_all(format!("rust_programs/mod_{i}/src"))?; + + let rust_filename = format!("rust_programs/mod_{i}/src/lib.rs"); + trace!("generating `{rust_filename}`"); + let mut file = File::create(rust_filename)?; + + write_lib_rs( + &mut file, + Self::NOTICE, + Self::PLATFORM_RUST_CFGS, + Self::PLATFORM_RUST_DEFINITIONS, + chunk, + )?; + + let toml_filename = format!("rust_programs/mod_{i}/Cargo.toml"); + trace!("generating `{toml_filename}`"); + let mut file = File::create(toml_filename).unwrap(); + + write_lib_cargo_toml(&mut file, &format!("mod_{i}"))?; + + Ok(()) + }) + .collect::>() + .unwrap(); + + compile_rust_programs(toolchain, target, linker) + } + + fn compare_outputs(&self) -> bool { + if self.cli_options().toolchain.is_some() { + let intrinsics_name_list = self + .intrinsics() + .iter() + .map(|i| i.name.clone()) + .collect::>(); + + compare::compare_outputs( + &intrinsics_name_list, + &self.cli_options().runner, + &self.cli_options().target, + ) + } else { + true + } + } } pub fn chunk_info(intrinsic_count: usize) -> (usize, usize) { diff --git a/library/stdarch/crates/intrinsic-test/src/main.rs b/library/stdarch/crates/intrinsic-test/src/main.rs index 538f317a2978b..44d7aafd827fe 100644 --- a/library/stdarch/crates/intrinsic-test/src/main.rs +++ b/library/stdarch/crates/intrinsic-test/src/main.rs @@ -13,23 +13,16 @@ fn main() { let args: Cli = clap::Parser::parse(); let processed_cli_options = ProcessedCli::new(args); - let test_environment_result: Option> = - match processed_cli_options.target.as_str() { - "aarch64-unknown-linux-gnu" - | "armv7-unknown-linux-gnueabihf" - | "aarch64_be-unknown-linux-gnu" => { - Some(ArmArchitectureTest::create(processed_cli_options)) - } + match processed_cli_options.target.as_str() { + "aarch64-unknown-linux-gnu" + | "armv7-unknown-linux-gnueabihf" + | "aarch64_be-unknown-linux-gnu" => run(ArmArchitectureTest::create(processed_cli_options)), - _ => None, - }; - - if test_environment_result.is_none() { - std::process::exit(0); + _ => std::process::exit(0), } +} - let test_environment = test_environment_result.unwrap(); - +fn run(test_environment: impl SupportedArchitectureTest) { info!("building C binaries"); if !test_environment.build_c_file() { std::process::exit(2); diff --git a/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml b/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml index a31613e6b1aef..ccdcea980e1b2 100644 --- a/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml +++ b/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml @@ -17,6 +17,10 @@ neon-stable: &neon-stable target-not-arm: &target-not-arm FnCall: [cfg, [{ FnCall: [not, ['target_arch = "arm"']]}]] +# #[cfg(not(target_arch = "arm64ec"))] +target-not-arm64ec: &target-not-arm64ec + FnCall: [cfg, [{ FnCall: [not, ['target_arch = "arm64ec"']]}]] + # #[cfg_attr(all(test, not(target_env = "msvc"))] msvc-disabled: &msvc-disabled FnCall: [all, [test, {FnCall: [not, ['target_env = "msvc"']]}]] @@ -169,6 +173,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [fabd] safety: safe types: @@ -368,6 +373,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcmp]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["h_f16", "f16", "u16"] @@ -559,6 +565,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcmp]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["h_f16", "f16", "u16"] @@ -642,6 +649,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcmp]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["h_f16", "f16", "u16"] @@ -777,6 +785,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcmp]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["h_f16", "f16", "u16"] @@ -859,6 +868,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcmp]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["h_f16", "f16", "u16"] @@ -931,6 +941,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [facgt]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["h_f16", "f16", "u16", i32] @@ -988,6 +999,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [facge]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["h_f16", "f16", "u16", i32] @@ -1036,6 +1048,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [facgt]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["h_f16", "f16", "u16"] @@ -1078,6 +1091,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [facge]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["h_f16", "f16", "u16"] @@ -1175,6 +1189,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['1']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const N: i32'] safety: safe types: @@ -1202,6 +1217,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['1']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const N: i32'] safety: safe types: @@ -1219,6 +1235,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['1']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const N: i32'] safety: safe types: @@ -1246,6 +1263,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['1']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const N: i32'] safety: safe types: @@ -1264,6 +1282,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['1']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const N: i32'] safety: safe types: @@ -1386,6 +1405,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [scvtf]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["s16", "f16", "h_f16_s16", i16] @@ -1402,6 +1422,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtzs]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["f16", "s16", "h", i16, 'a as i16'] @@ -1418,6 +1439,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtzu]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["f16", "u16", "h", 'a as u16'] @@ -1435,6 +1457,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [ucvtf]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["u16", "f16", "h_f16_u16"] @@ -1486,6 +1509,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtn2]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x8_t, float16x4_t, float32x4_t] @@ -1503,6 +1527,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtl2]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float32x4_t, float16x8_t] @@ -1650,6 +1675,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['1']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const N: i32'] safety: safe types: @@ -1675,6 +1701,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['1']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const N: i32'] safety: safe types: @@ -1693,6 +1720,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['1']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const N: i32'] safety: safe types: @@ -1783,6 +1811,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtas]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x4_t, int16x4_t] @@ -1821,6 +1850,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtau]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["f16", "u32", 'h_u32_f16'] @@ -1842,6 +1872,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtas]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["f16", "i32", 'h_s32_f16'] @@ -1863,6 +1894,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtas]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["f16", "i16", 'h_s16_f16', 's32'] @@ -1877,6 +1909,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtau]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["f16", "u16", 'h_u16_f16', 'u32'] @@ -1948,6 +1981,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtns]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x4_t, int16x4_t] @@ -1968,6 +2002,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtnu]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x4_t, uint16x4_t] @@ -1987,6 +2022,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtns]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["f16", "i32", 'h'] @@ -2007,6 +2043,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtns]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["f16", "i16", 'h', 'i32'] @@ -2022,6 +2059,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtnu]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["f16", "u32", 'h'] @@ -2042,6 +2080,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtnu]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["f16", "u16", 'h', 'u32'] @@ -2077,6 +2116,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtms]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x4_t, int16x4_t] @@ -2097,6 +2137,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtmu]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x4_t, uint16x4_t] @@ -2291,6 +2332,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtps]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x4_t, int16x4_t] @@ -2311,6 +2353,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtpu]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x4_t, uint16x4_t] @@ -2331,6 +2374,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtps]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["f16", "i32", 'h'] @@ -2351,6 +2395,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtps]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["f16", "i16", 'h', 'i32'] @@ -2365,6 +2410,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtpu]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["f16", "u32", 'h'] @@ -2385,6 +2431,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtpu]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["f16", "u16", 'h', 'u32'] @@ -2531,6 +2578,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['1']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const N: i32'] safety: safe types: @@ -2549,6 +2597,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['1']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const N: i32'] safety: safe types: @@ -2770,6 +2819,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [fneg] safety: safe types: @@ -2986,6 +3036,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [frintx] safety: safe types: @@ -3002,6 +3053,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [frintx] safety: safe types: @@ -3033,6 +3085,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [frinta] safety: safe types: @@ -3049,6 +3102,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [frinta] safety: safe types: @@ -3096,6 +3150,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [frintn] safety: safe types: @@ -3130,6 +3185,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [frintm] safety: safe types: @@ -3146,6 +3202,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [frintm] safety: safe types: @@ -3178,6 +3235,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [frintp] safety: safe types: @@ -3193,6 +3251,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [frintp] safety: safe types: @@ -3222,6 +3281,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [frintz] safety: safe types: @@ -3238,6 +3298,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [frintz] safety: safe types: @@ -3273,6 +3334,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [frinti] safety: safe types: @@ -3293,6 +3355,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec # TODO: double check me assert_instr: [frinti] safety: safe @@ -5205,6 +5268,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [fmulx] safety: safe types: @@ -5243,6 +5307,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [fmulx] safety: safe types: @@ -5385,6 +5450,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['2']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ["const LANE: i32"] safety: safe types: @@ -5438,6 +5504,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['2']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ["const LANE: i32"] safety: safe types: @@ -5462,6 +5529,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fmulx]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x4_t, "f16"] @@ -5546,6 +5614,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [fmla] safety: safe types: @@ -5582,6 +5651,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [fdiv] safety: safe types: @@ -5597,6 +5667,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [nop] safety: safe types: @@ -5637,6 +5708,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [nop] safety: safe types: @@ -5928,6 +6000,7 @@ intrinsics: - *neon-fp16 - *enable-fcma - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [fcadd] safety: safe types: @@ -5948,6 +6021,7 @@ intrinsics: - *neon-fp16 - *enable-fcma - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [fcadd] safety: safe types: @@ -5988,6 +6062,7 @@ intrinsics: - FnCall: [target_feature, ['enable = "neon,fcma"']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [fcmla] safety: safe types: @@ -6028,6 +6103,7 @@ intrinsics: - FnCall: [target_feature, ['enable = "neon,fcma"']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [fcmla] safety: safe types: @@ -6069,6 +6145,7 @@ intrinsics: - FnCall: [target_feature, ['enable = "neon,fcma"']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [fcmla] safety: safe types: @@ -6113,6 +6190,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['3']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ["const LANE: i32"] safety: safe types: @@ -6158,6 +6236,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['3']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ["const LANE: i32"] safety: safe types: @@ -6203,6 +6282,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['3']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ["const LANE: i32"] safety: safe types: @@ -6245,6 +6325,7 @@ intrinsics: - FnCall: [target_feature, ['enable = "neon,fcma"']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [fcmla] safety: safe types: @@ -6290,6 +6371,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['3']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ["const LANE: i32"] safety: safe types: @@ -6337,6 +6419,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['3']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ["const LANE: i32"] safety: safe types: @@ -6384,6 +6467,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['3']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ["const LANE: i32"] safety: safe types: @@ -6430,6 +6514,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['3']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ["const LANE: i32"] safety: safe types: @@ -6473,6 +6558,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['3']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ["const LANE: i32"] safety: safe types: @@ -6568,6 +6654,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [fmax] safety: safe types: @@ -6601,6 +6688,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [fmaxnm] safety: safe types: @@ -6616,6 +6704,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [fminnm] safety: safe types: @@ -6657,6 +6746,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [fmaxnmv] safety: safe types: @@ -6673,6 +6763,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [fminnmv] safety: safe types: @@ -6689,6 +6780,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [fmaxv] safety: safe types: @@ -6708,6 +6800,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [fminv] safety: safe types: @@ -6762,6 +6855,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [fmin] safety: safe types: @@ -6875,6 +6969,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [faddp] safety: safe types: @@ -6894,6 +6989,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [fmaxp] safety: safe types: @@ -6914,6 +7010,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [fmaxnmp] safety: safe types: @@ -6934,6 +7031,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [fminp] safety: safe types: @@ -6954,6 +7052,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [fminnmp] safety: safe types: @@ -8379,6 +8478,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fsqrt]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - float16x4_t @@ -8393,6 +8493,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [fsqrt] safety: safe types: @@ -8445,6 +8546,7 @@ intrinsics: - *neon-fp16 - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [frsqrts]]}]] - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [h_f16, "f16"] @@ -8501,6 +8603,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [frecpe]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [h_f16, "f16"] @@ -8557,6 +8660,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [frecps]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [h_f16, "f16"] @@ -8595,6 +8699,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [frecpx]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [h_f16, "f16"] @@ -8676,6 +8781,7 @@ intrinsics: - [float64x1_t, float32x2_t] - [float32x4_t, float64x2_t] - [float64x2_t, float32x4_t] + big_endian_inverse: false compose: - FnCall: [transmute, [a]] @@ -8687,6 +8793,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [nop] safety: safe types: @@ -8695,6 +8802,7 @@ intrinsics: # q - [float64x2_t, float16x8_t] - [float16x8_t, float64x2_t] + big_endian_inverse: false compose: - FnCall: [transmute, [a]] @@ -9586,6 +9694,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [not, ['target_env = "msvc"']]}]]}, {FnCall: [assert_instr, [trn1]]}]] safety: safe types: @@ -9647,6 +9756,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [not, ['target_env = "msvc"']]}]]}, {FnCall: [assert_instr, [trn2]]}]] safety: safe types: @@ -9715,6 +9825,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [not, ['target_env = "msvc"']]}]]}, {FnCall: [assert_instr, [zip2]]}]] safety: safe types: @@ -9765,6 +9876,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [not, ['target_env = "msvc"']]}]]}, {FnCall: [assert_instr, [zip1]]}]] safety: safe types: @@ -9826,6 +9938,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [not, ['target_env = "msvc"']]}]]}, {FnCall: [assert_instr, [uzp1]]}]] safety: safe types: @@ -9891,6 +10004,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [not, ['target_env = "msvc"']]}]]}, {FnCall: [assert_instr, [uzp2]]}]] safety: safe types: @@ -10180,6 +10294,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['3']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const LANE: i32'] safety: safe types: @@ -10206,6 +10321,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['3']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const LANE: i32'] safety: safe types: @@ -10230,6 +10346,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fmsub]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["f16", "h_f16"] @@ -10342,6 +10459,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fmadd]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["f16", "h_f16"] @@ -10358,6 +10476,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['3']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const LANE: i32'] safety: safe types: @@ -10377,6 +10496,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['3']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const LANE: i32'] safety: safe types: @@ -10541,6 +10661,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcmeq]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x4_t, uint16x4_t, 'f16x4', 'f16x4::new(0.0, 0.0, 0.0, 0.0)'] @@ -10576,6 +10697,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcmp]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["f16", "u16", "h_f16"] @@ -10700,6 +10822,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcmp]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["h_f16", "f16", "u16"] @@ -10842,6 +10965,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcmp]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["h_f16", "f16", "u16"] @@ -10972,6 +11096,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcmp]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["h_f16", "f16", "u16"] @@ -11126,6 +11251,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['2']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const LANE: i32'] safety: safe types: @@ -11146,6 +11272,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [nop] safety: safe types: @@ -11183,6 +11310,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['2']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const LANE: i32'] safety: safe types: @@ -11328,6 +11456,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcmp]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["f16", "u16", 'h_f16'] @@ -11399,6 +11528,7 @@ intrinsics: attr: - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [fmls] safety: safe types: @@ -11651,6 +11781,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [frsqrte]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["h_f16", "f16"] @@ -11734,6 +11865,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtau]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x4_t, uint16x4_t] @@ -11772,6 +11904,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtms]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["f16", "i32", 'h'] @@ -11792,6 +11925,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtms]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["f16", "i16", 'h', 'i32'] @@ -11807,6 +11941,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtmu]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["f16", "u32", 'h'] @@ -11827,6 +11962,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fcvtmu]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["f16", "u16", 'h', 'u32'] @@ -12857,6 +12993,7 @@ intrinsics: - FnCall: [target_feature, ['enable = "{type[2]}"']] - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [ldr]]}]] - *neon-unstable-f16 + - *target-not-arm64ec safety: unsafe: [neon] types: @@ -12924,6 +13061,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [str]]}]] - FnCall: [allow, ['clippy::cast_ptr_alignment']] - *neon-unstable-f16 + - *target-not-arm64ec safety: unsafe: [neon] types: @@ -13637,6 +13775,7 @@ intrinsics: - *neon-fp16 - *enable-fhm - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [fmlal2] safety: safe types: @@ -13660,6 +13799,7 @@ intrinsics: - *enable-fhm - FnCall: [rustc_legacy_const_generics, ['3']] - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const LANE: i32'] safety: safe types: @@ -13684,6 +13824,7 @@ intrinsics: - *neon-fp16 - *enable-fhm - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [fmlal] safety: safe types: @@ -13707,6 +13848,7 @@ intrinsics: - *enable-fhm - FnCall: [rustc_legacy_const_generics, ['3']] - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const LANE: i32'] safety: safe types: @@ -13731,6 +13873,7 @@ intrinsics: - *neon-fp16 - *enable-fhm - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [fmlsl2] safety: safe types: @@ -13753,6 +13896,7 @@ intrinsics: - *enable-fhm - FnCall: [rustc_legacy_const_generics, ['3']] - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const LANE: i32'] safety: safe types: @@ -13777,6 +13921,7 @@ intrinsics: - *neon-fp16 - *enable-fhm - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [fmlsl] safety: safe types: @@ -13799,6 +13944,7 @@ intrinsics: - *enable-fhm - FnCall: [rustc_legacy_const_generics, ['3']] - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const LANE: i32'] safety: safe types: diff --git a/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml b/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml index c96c6e2a0c0b5..61a3a5853632c 100644 --- a/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml +++ b/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml @@ -37,6 +37,10 @@ target-is-arm: &target-is-arm target-not-arm: &target-not-arm FnCall: [cfg, [{ FnCall: [not, ['target_arch = "arm"']]}]] +# #[cfg(not(target_arch = "arm64ec"))] +target-not-arm64ec: &target-not-arm64ec + FnCall: [cfg, [{ FnCall: [not, ['target_arch = "arm64ec"']]}]] + not-arm: ¬-arm FnCall: [not, ['target_arch = "arm"']] @@ -278,6 +282,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fabd]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - float16x4_t @@ -396,6 +401,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fcmeq]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x4_t, uint16x4_t] @@ -457,6 +463,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fabs]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - float16x4_t @@ -474,6 +481,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fabs]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ['h_f16', 'f16'] @@ -555,6 +563,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fcmgt]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x4_t, uint16x4_t] @@ -573,6 +582,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fcmgt]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x4_t, uint16x4_t, f16x4, 'f16x4::new(0.0, 0.0, 0.0, 0.0)'] @@ -651,6 +661,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fcmge]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x4_t, uint16x4_t] @@ -668,6 +679,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fcmle]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x4_t, uint16x4_t, f16x4, 'f16x4::new(0.0, 0.0, 0.0, 0.0)'] @@ -849,6 +861,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [facgt]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x4_t, uint16x4_t] @@ -895,6 +908,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [facge]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x4_t, uint16x4_t] @@ -935,6 +949,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [facgt]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x4_t, uint16x4_t] @@ -970,6 +985,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [facge]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x4_t, uint16x4_t] @@ -1004,6 +1020,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [scvtf]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [int16x4_t, float16x4_t] @@ -1038,6 +1055,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ucvtf]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [uint16x4_t, float16x4_t] @@ -1109,6 +1127,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['1']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const N: i32'] safety: safe types: @@ -1140,6 +1159,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['1']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const N: i32'] safety: safe types: @@ -1171,6 +1191,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['1']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const N: i32'] safety: safe types: @@ -1229,6 +1250,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['1']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const N: i32'] safety: safe types: @@ -1482,6 +1504,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['1']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const N: i32'] safety: safe types: @@ -1501,6 +1524,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [dup]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x4_t, f16, 'float16x4', '_n_'] @@ -1519,6 +1543,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['1']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const N: i32'] safety: safe types: @@ -1740,6 +1765,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['2']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const N: i32'] safety: safe types: @@ -1758,6 +1784,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['2']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const N: i32'] safety: safe types: @@ -2207,6 +2234,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fneg]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x4_t, 'f16'] @@ -2262,13 +2290,10 @@ intrinsics: - [uint64x1_t, u64, i64] - [uint64x2_t, u64, i64] compose: - - LLVMLink: - name: "uqsub.{neon_type[0]}" - links: - - link: "llvm.aarch64.neon.uqsub.v{neon_type[0].lane}{type[2]}" - arch: aarch64,arm64ec - - link: "llvm.usub.sat.v{neon_type[0].lane}{type[2]}" - arch: arm + - FnCall: + - simd_saturating_sub + - - a + - b - name: "vqsub{neon_type[0].no}" doc: Saturating subtract @@ -2291,13 +2316,10 @@ intrinsics: - [int64x1_t, s64, i64] - [int64x2_t, s64, i64] compose: - - LLVMLink: - name: "sqsub.{neon_type[0]}" - links: - - link: "llvm.aarch64.neon.sqsub.v{neon_type[0].lane}{type[2]}" - arch: aarch64,arm64ec - - link: "llvm.ssub.sat.v{neon_type[0].lane}{type[2]}" - arch: arm + - FnCall: + - simd_saturating_sub + - - a + - b - name: "vhadd{neon_type.no}" doc: Halving add @@ -2464,9 +2486,7 @@ intrinsics: name: "llvm.frinn.{neon_type}" links: - link: "llvm.roundeven.{neon_type}" - arch: aarch64,arm64ec - - link: "llvm.arm.neon.vrintn.{neon_type}" - arch: arm + arch: aarch64,arm64ec,arm - name: "vrndn{neon_type.no}" doc: "Floating-point round to integral, to nearest with ties to even" @@ -2478,6 +2498,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [frintn]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - float16x4_t @@ -2487,9 +2508,7 @@ intrinsics: name: "llvm.frinn.{neon_type}" links: - link: "llvm.roundeven.{neon_type}" - arch: aarch64,arm64ec - - link: "llvm.arm.neon.vrintn.{neon_type}" - arch: arm + arch: aarch64,arm64ec,arm - name: "vqadd{neon_type.no}" doc: Saturating add @@ -2512,13 +2531,10 @@ intrinsics: - uint64x1_t - uint64x2_t compose: - - LLVMLink: - name: "uqadd.{neon_type}" - links: - - link: "llvm.aarch64.neon.uqadd.{neon_type}" - arch: aarch64,arm64ec - - link: "llvm.uadd.sat.{neon_type}" - arch: arm + - FnCall: + - simd_saturating_add + - - a + - b - name: "vqadd{neon_type.no}" doc: Saturating add @@ -2541,13 +2557,10 @@ intrinsics: - int64x1_t - int64x2_t compose: - - LLVMLink: - name: "sqadd.{neon_type}" - links: - - link: "llvm.aarch64.neon.sqadd.{neon_type}" - arch: aarch64,arm64ec - - link: "llvm.sadd.sat.{neon_type}" - arch: arm + - FnCall: + - simd_saturating_add + - - a + - b - name: "vld1{neon_type[1].no}" doc: "Load multiple single-element structures to one, two, three, or four registers" @@ -2743,6 +2756,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ld1]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: unsafe: [neon] types: @@ -2773,6 +2787,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ["2"]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const LANE: i32'] safety: unsafe: [neon] @@ -2793,6 +2808,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ld1r]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: unsafe: [neon] types: @@ -3385,6 +3401,7 @@ intrinsics: - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vld2]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: unsafe: [neon] types: @@ -3413,6 +3430,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ld2]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: unsafe: [neon] types: @@ -3440,6 +3458,7 @@ intrinsics: - *neon-fp16 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vld2]]}]] - *neon-unstable-f16 + - *target-not-arm64ec safety: unsafe: [neon] types: @@ -3469,6 +3488,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ld2r]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: unsafe: [neon] types: @@ -3498,6 +3518,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ["2"]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: - "const LANE: i32" safety: @@ -3540,6 +3561,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ["2"]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: - "const LANE: i32" safety: @@ -3580,6 +3602,7 @@ intrinsics: - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vld3]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: unsafe: [neon] types: @@ -3608,6 +3631,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ld3]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: unsafe: [neon] types: @@ -3635,6 +3659,7 @@ intrinsics: - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vld3]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: unsafe: [neon] types: @@ -3664,6 +3689,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ld3r]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: unsafe: [neon] types: @@ -3693,6 +3719,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ["2"]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: - "const LANE: i32" safety: @@ -3737,6 +3764,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ["2"]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: - "const LANE: i32" safety: @@ -4718,6 +4746,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ["2"]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec types: - ['*mut f16', float16x4_t, '2'] - ['*mut f16', float16x8_t, '3'] @@ -4955,6 +4984,7 @@ intrinsics: - *neon-v7 - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [vst1] types: - [f16, float16x4x4_t, float16x4_t] @@ -5098,6 +5128,7 @@ intrinsics: - *target-not-arm - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [st2] safety: unsafe: [neon] @@ -5188,6 +5219,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [st2, 'LANE = 0']]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const LANE: i32'] safety: unsafe: [neon] @@ -5279,6 +5311,7 @@ intrinsics: - *neon-v7 - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [vst2] safety: unsafe: [neon] @@ -5345,6 +5378,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['2']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const LANE: i32'] safety: unsafe: [neon] @@ -5555,6 +5589,7 @@ intrinsics: - *neon-v7 - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [vst3] safety: unsafe: [neon] @@ -5623,6 +5658,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['2']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const LANE: i32'] safety: unsafe: [neon] @@ -5683,6 +5719,7 @@ intrinsics: - *target-not-arm - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [st3] safety: unsafe: [neon] @@ -5747,6 +5784,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [st3, 'LANE = 0']]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const LANE: i32'] safety: unsafe: [neon] @@ -5960,6 +5998,7 @@ intrinsics: - *neon-v7 - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [vst4] safety: unsafe: [neon] @@ -6029,6 +6068,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['2']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const LANE: i32'] safety: unsafe: [neon] @@ -6091,6 +6131,7 @@ intrinsics: - *target-not-arm - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [st4] safety: unsafe: [neon] @@ -6157,6 +6198,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [st4, 'LANE = 0']]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const LANE: i32'] safety: unsafe: [neon] @@ -6317,6 +6359,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fmul]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [f16, float16x4_t] @@ -6366,6 +6409,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['2']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ["const LANE: i32"] safety: safe types: @@ -6569,6 +6613,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fmla]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - float16x4_t @@ -6678,6 +6723,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fsub]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ['f16', float16x4_t] @@ -6696,6 +6742,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fadd]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - float16x4_t @@ -6716,6 +6763,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fadd]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ['h_f16', 'f16'] @@ -7194,6 +7242,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fmax]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - float16x4_t @@ -7236,6 +7285,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fmaxnm]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - float16x4_t @@ -7254,6 +7304,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fminnm]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - float16x4_t @@ -7340,6 +7391,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fmin]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - float16x4_t @@ -7404,6 +7456,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [faddp]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - float16x4_t @@ -8247,6 +8300,7 @@ intrinsics: - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vrsqrts]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [frsqrts]]}]] - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - float16x4_t @@ -8295,6 +8349,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [frecpe]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - float16x4_t @@ -8343,6 +8398,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [frecps]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - float16x4_t @@ -8424,6 +8480,7 @@ intrinsics: - [poly16x8_t, p128] - [int8x16_t, p128] - [uint8x16_t, p128] + big_endian_inverse: false compose: - FnCall: [transmute, [a]] @@ -8661,6 +8718,7 @@ intrinsics: - [poly8x16_t, float32x4_t] - [poly16x8_t, float32x4_t] - [p128, float32x4_t] + big_endian_inverse: false compose: - FnCall: [transmute, [a]] @@ -8675,6 +8733,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [nop]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: # non-q @@ -8723,6 +8782,7 @@ intrinsics: - [float16x8_t, uint16x8_t] - [float16x8_t, uint32x4_t] - [float16x8_t, uint64x2_t] + big_endian_inverse: false compose: - FnCall: [transmute, [a]] @@ -8737,6 +8797,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [nop]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [poly64x1_t, float16x4_t] @@ -8746,6 +8807,7 @@ intrinsics: - [poly128_t, float16x8_t] - [float16x8_t, poly128_t] - [float16x8_t, poly64x2_t] + big_endian_inverse: false compose: - FnCall: [transmute, [a]] @@ -8759,6 +8821,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [rev64]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x4_t, "[3, 2, 1, 0]"] @@ -9074,6 +9137,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [nop]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ["u64", float16x4_t] @@ -9147,6 +9211,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['2']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const LANE: i32'] safety: safe types: @@ -9578,6 +9643,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [trn2]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x4_t, float16x4x2_t, '[0, 4, 2, 6]', '[1, 5, 3, 7]'] @@ -9733,6 +9799,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [zip2]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x4_t, float16x4x2_t, '[0, 4, 1, 5]', '[2, 6, 3, 7]'] @@ -9803,6 +9870,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [uzp2]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x4_t, float16x4x2_t, '[0, 2, 4, 6]', '[1, 3, 5, 7]'] @@ -10050,6 +10118,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [vst1]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: unsafe: [neon] types: @@ -10077,6 +10146,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [st1]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: unsafe: [neon] types: @@ -10103,6 +10173,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [vst1]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: unsafe: [neon] types: @@ -10179,6 +10250,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [st1]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: unsafe: [neon] types: @@ -10233,6 +10305,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [st1]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: unsafe: [neon] types: @@ -10376,6 +10449,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fcmge]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x4_t, uint16x4_t] @@ -10394,6 +10468,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fcmge]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x4_t, uint16x4_t, f16x4, 'f16x4::new(0.0, 0.0, 0.0, 0.0)'] @@ -10633,6 +10708,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fcvtzu]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x4_t, uint16x4_t] @@ -10652,6 +10728,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fcvtn]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float32x4_t, float16x4_t] @@ -10668,6 +10745,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fcvtl]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x4_t, float32x4_t] @@ -11029,6 +11107,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fmul]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x4_t, "f16"] @@ -11141,6 +11220,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fcmgt]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x4_t, uint16x4_t] @@ -11159,6 +11239,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fcmlt]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x4_t, uint16x4_t, f16x4, 'f16x4::new(0.0, 0.0, 0.0, 0.0)'] @@ -11271,6 +11352,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fmls]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - float16x4_t @@ -11386,6 +11468,7 @@ intrinsics: - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vrsqrte]]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [frsqrte]]}]] - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - float16x4_t @@ -11499,6 +11582,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [fcvtzs]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x4_t, int16x4_t] @@ -11748,6 +11832,7 @@ intrinsics: - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [nop]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: unsafe: [neon] types: @@ -11834,6 +11919,7 @@ intrinsics: - FnCall: [target_feature, ['enable = "{type[3]}"']] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['{type[2]}']]}]] types: - ['*const f16', float16x4_t, '"vld1.16"', 'neon,v7', 'crate::mem::align_of::() as i32', '_v4f16'] @@ -12117,6 +12203,7 @@ intrinsics: - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vld4]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: unsafe: [neon] types: @@ -12145,6 +12232,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ld4]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: unsafe: [neon] types: @@ -12172,6 +12260,7 @@ intrinsics: - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vld4]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: unsafe: [neon] types: @@ -12201,6 +12290,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [ld4r]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: unsafe: [neon] types: @@ -12230,6 +12320,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ["2"]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: - "const LANE: i32" safety: @@ -12276,6 +12367,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ["2"]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec static_defs: - "const LANE: i32" safety: @@ -13674,6 +13766,7 @@ intrinsics: - *neon-v7 - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vst1.{type[4]}"']]}]] types: - ['_v4f16', '* const i8', float16x4_t, i32, '16'] @@ -13738,6 +13831,7 @@ intrinsics: - *neon-v7 - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, ['"vst1.{type[2]}"']]}]] types: - ['*mut f16', float16x4_t, '16', 'transmute(a)', 'crate::mem::align_of::() as i32', '_v4f16'] @@ -13918,6 +14012,7 @@ intrinsics: - *neon-v7 - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [nop] safety: safe types: @@ -13933,6 +14028,7 @@ intrinsics: - *neon-v7 - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec assert_instr: [nop] safety: safe types: @@ -13952,6 +14048,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [nop, 'LANE = 0']]}]] - FnCall: [rustc_legacy_const_generics, ["1"]] - *neon-unstable-f16 + - *target-not-arm64ec static_defs: ['const LANE: i32'] safety: safe types: @@ -13971,6 +14068,7 @@ intrinsics: - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, [dup]]}]] - *neon-fp16 - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - [float16x4_t, f16] @@ -14585,6 +14683,7 @@ intrinsics: - FnCall: [cfg_attr, [*test-is-arm, { FnCall: [assert_instr, ['vbsl']]}]] - FnCall: [cfg_attr, [*neon-target-aarch64-arm64ec, {FnCall: [assert_instr, ['bsl']]}]] - *neon-unstable-f16 + - *target-not-arm64ec safety: safe types: - ['vbslq_f16', 'uint16x8_t', 'float16x8_t', 'int16x8_t::splat(-1)'] diff --git a/library/stdarch/crates/stdarch-gen-arm/src/load_store_tests.rs b/library/stdarch/crates/stdarch-gen-arm/src/load_store_tests.rs index 3f3bfed132c76..0f4de83dacb4a 100644 --- a/library/stdarch/crates/stdarch-gen-arm/src/load_store_tests.rs +++ b/library/stdarch/crates/stdarch-gen-arm/src/load_store_tests.rs @@ -85,7 +85,7 @@ pub fn generate_load_store_tests( TokenStream::from_str(&PREAMBLE).map_err(|e| format!("Preamble is invalid: {e}"))?; // Only output manual tests for the SVE set let manual_tests = match &load_intrinsics[0].target_features[..] { - [s] if s == "sve" => TokenStream::from_str(&MANUAL_TESTS) + [s] if s == "sve" => TokenStream::from_str(MANUAL_TESTS) .map_err(|e| format!("Manual tests are invalid: {e}"))?, _ => quote!(), }; diff --git a/library/stdarch/crates/stdarch-test/src/disassembly.rs b/library/stdarch/crates/stdarch-test/src/disassembly.rs index f5167ea8d8ef3..4c136cff02ae6 100644 --- a/library/stdarch/crates/stdarch-test/src/disassembly.rs +++ b/library/stdarch/crates/stdarch-test/src/disassembly.rs @@ -27,9 +27,9 @@ fn normalize(mut symbol: &str) -> String { symbol = symbol[last_colon + 1..].to_string(); } - // Normalize to no leading underscore to handle platforms that may + // Normalize to no leading mangling chars to handle platforms that may // inject extra ones in symbol names. - while symbol.starts_with('_') || symbol.starts_with('.') { + while symbol.starts_with('_') || symbol.starts_with('.') || symbol.starts_with('#') { symbol.remove(0); } // Windows/x86 has a suffix such as @@4. @@ -49,6 +49,8 @@ pub(crate) fn disassemble_myself() -> HashSet { "i686-pc-windows-msvc" } else if cfg!(target_arch = "aarch64") { "aarch64-pc-windows-msvc" + } else if cfg!(target_arch = "arm64ec") { + "arm64ec-pc-windows-msvc" } else { panic!("disassembly unimplemented") }; diff --git a/library/stdarch/examples/hex.rs b/library/stdarch/examples/hex.rs index 95ffbaf666cea..621f55bc0951f 100644 --- a/library/stdarch/examples/hex.rs +++ b/library/stdarch/examples/hex.rs @@ -22,7 +22,6 @@ #![allow( clippy::unwrap_used, clippy::print_stdout, - clippy::unwrap_used, clippy::shadow_reuse, clippy::cast_possible_wrap, clippy::cast_ptr_alignment, diff --git a/src/bootstrap/src/core/builder/cargo.rs b/src/bootstrap/src/core/builder/cargo.rs index 6121bf7d9cd62..ee2bb710674c0 100644 --- a/src/bootstrap/src/core/builder/cargo.rs +++ b/src/bootstrap/src/core/builder/cargo.rs @@ -1227,6 +1227,11 @@ impl Builder<'_> { rustflags.arg("-Zehcont-guard"); } + // Optionally override the rc.exe when compiling rustc on Windows. + if let Some(windows_rc) = &self.config.windows_rc { + cargo.env("RUSTC_WINDOWS_RC", windows_rc); + } + // For `cargo doc` invocations, make rustdoc print the Rust version into the docs // This replaces spaces with tabs because RUSTDOCFLAGS does not // support arguments with regular spaces. Hopefully someday Cargo will diff --git a/src/bootstrap/src/core/config/config.rs b/src/bootstrap/src/core/config/config.rs index efb7ad9169971..a97399b3d4ae9 100644 --- a/src/bootstrap/src/core/config/config.rs +++ b/src/bootstrap/src/core/config/config.rs @@ -273,6 +273,7 @@ pub struct Config { pub gdb: Option, pub lldb: Option, pub python: Option, + pub windows_rc: Option, pub reuse: Option, pub cargo_native_static: bool, pub configure_args: Vec, @@ -450,6 +451,7 @@ impl Config { nodejs: build_nodejs, npm: build_npm, python: build_python, + windows_rc: build_windows_rc, reuse: build_reuse, locked_deps: build_locked_deps, vendor: build_vendor, @@ -1342,6 +1344,7 @@ impl Config { .unwrap_or(rust_debug == Some(true)), vendor, verbose_tests, + windows_rc: build_windows_rc.map(PathBuf::from), // tidy-alphabetical-end } } diff --git a/src/bootstrap/src/core/config/toml/build.rs b/src/bootstrap/src/core/config/toml/build.rs index 25c19f1070a39..a9d4d3961c9b7 100644 --- a/src/bootstrap/src/core/config/toml/build.rs +++ b/src/bootstrap/src/core/config/toml/build.rs @@ -37,6 +37,7 @@ define_config! { nodejs: Option = "nodejs", npm: Option = "npm", python: Option = "python", + windows_rc: Option = "windows-rc", reuse: Option = "reuse", locked_deps: Option = "locked-deps", vendor: Option = "vendor", diff --git a/src/bootstrap/src/utils/change_tracker.rs b/src/bootstrap/src/utils/change_tracker.rs index 2c48cebd2df05..6b187578c31ec 100644 --- a/src/bootstrap/src/utils/change_tracker.rs +++ b/src/bootstrap/src/utils/change_tracker.rs @@ -551,4 +551,9 @@ pub const CONFIG_CHANGE_HISTORY: &[ChangeInfo] = &[ severity: ChangeSeverity::Info, summary: "There is now a bootstrap option called `rust.parallel-frontend-threads`, which can be used to set the number of threads for the compiler frontend used during compilation of Rust code.", }, + ChangeInfo { + change_id: 146663, + severity: ChangeSeverity::Info, + summary: "New option `build.windows-rc` that will override which resource compiler on Windows will be used to compile Rust.", + }, ]; diff --git a/src/tools/compiletest/Cargo.toml b/src/tools/compiletest/Cargo.toml index cdada5a223062..6597c3c70f691 100644 --- a/src/tools/compiletest/Cargo.toml +++ b/src/tools/compiletest/Cargo.toml @@ -12,7 +12,7 @@ path = "src/bin/main.rs" [dependencies] # tidy-alphabetical-start -anstyle-svg = "0.1.3" +anstyle-svg = "0.1.11" build_helper = { path = "../../build_helper" } camino = "1" colored = "2" diff --git a/src/tools/tidy/src/lib.rs b/src/tools/tidy/src/lib.rs index 2a9c3963d2d86..f7920e3205ab2 100644 --- a/src/tools/tidy/src/lib.rs +++ b/src/tools/tidy/src/lib.rs @@ -237,7 +237,7 @@ pub fn ensure_version_or_cargo_install( if !cargo_exit_code.success() { return Err(io::Error::other("cargo install failed")); } - let bin_path = tool_bin_dir.join(bin_name); + let bin_path = tool_bin_dir.join(bin_name).with_extension(env::consts::EXE_EXTENSION); assert!( matches!(bin_path.try_exists(), Ok(true)), "cargo install did not produce the expected binary" diff --git a/tests/ui/codemap_tests/huge_multispan_highlight.ascii.svg b/tests/ui/codemap_tests/huge_multispan_highlight.ascii.svg index 1cedbf75e4bf6..7ffbc64b074bf 100644 --- a/tests/ui/codemap_tests/huge_multispan_highlight.ascii.svg +++ b/tests/ui/codemap_tests/huge_multispan_highlight.ascii.svg @@ -1,7 +1,7 @@ - +