Auto merge of #152639 - jhpratt:rollup-sIUYGho, r=jhpratt

Rollup of 9 pull requests

Successful merges:

 - rust-lang/rust#150424 (diagnostics: add note when param-env shadows global impl)
 - rust-lang/rust#152132 (implement `carryless_mul`)
 - rust-lang/rust#152508 (Improve write! and writeln! error when called without destination)
 - rust-lang/rust#152534 (Test(lib/win/net): Skip UDS tests when under Win7)
 - rust-lang/rust#152578 (ci: Lock cross toolchain version and update docs)
 - rust-lang/rust#152188 (Include `library/stdarch` for `CURRENT_RUSTC_VERSION` updates)
 - rust-lang/rust#152402 (Add regression test for rust-lang/rust#141738)
 - rust-lang/rust#152472 (unwind/wasm: fix compile error by wrapping wasm_throw in unsafe block)
 - rust-lang/rust#152610 (Exchange js_lint message between bless and non-bless)
This commit is contained in:
bors
2026-02-15 06:20:35 +00:00
59 changed files with 2278 additions and 1278 deletions
@@ -387,6 +387,27 @@ fn codegen_intrinsic_call(
let pair = self.insert_value(pair, high, 1);
pair
}
// FIXME move into the branch below when LLVM 22 is the lowest version we support.
sym::carryless_mul if crate::llvm_util::get_version() >= (22, 0, 0) => {
let ty = args[0].layout.ty;
if !ty.is_integral() {
tcx.dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
span,
name,
ty,
});
return Ok(());
}
let (size, _) = ty.int_size_and_signed(self.tcx);
let width = size.bits();
let llty = self.type_ix(width);
let lhs = args[0].immediate();
let rhs = args[1].immediate();
self.call_intrinsic("llvm.clmul", &[llty], &[lhs, rhs])
}
sym::ctlz
| sym::ctlz_nonzero
| sym::cttz
@@ -2784,6 +2805,7 @@ macro_rules! arith_unary {
| sym::simd_ctlz
| sym::simd_ctpop
| sym::simd_cttz
| sym::simd_carryless_mul
| sym::simd_funnel_shl
| sym::simd_funnel_shr
) {
@@ -2808,6 +2830,7 @@ macro_rules! arith_unary {
sym::simd_cttz => "llvm.cttz",
sym::simd_funnel_shl => "llvm.fshl",
sym::simd_funnel_shr => "llvm.fshr",
sym::simd_carryless_mul => "llvm.clmul",
_ => unreachable!(),
};
let int_size = in_elem.int_size_and_signed(bx.tcx()).0.bits();
@@ -2833,6 +2856,17 @@ macro_rules! arith_unary {
&[vec_ty],
&[args[0].immediate(), args[1].immediate(), args[2].immediate()],
)),
sym::simd_carryless_mul => {
if crate::llvm_util::get_version() >= (22, 0, 0) {
Ok(bx.call_intrinsic(
llvm_intrinsic,
&[vec_ty],
&[args[0].immediate(), args[1].immediate()],
))
} else {
span_bug!(span, "`simd_carryless_mul` needs LLVM 22 or higher");
}
}
_ => unreachable!(),
};
}
+8 -1
View File
@@ -354,7 +354,14 @@ fn target_config(&self, sess: &Session) -> TargetConfig {
}
fn replaced_intrinsics(&self) -> Vec<Symbol> {
vec![sym::unchecked_funnel_shl, sym::unchecked_funnel_shr, sym::carrying_mul_add]
let mut will_not_use_fallback =
vec![sym::unchecked_funnel_shl, sym::unchecked_funnel_shr, sym::carrying_mul_add];
if llvm_util::get_version() >= (22, 0, 0) {
will_not_use_fallback.push(sym::carryless_mul);
}
will_not_use_fallback
}
fn codegen_crate<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Box<dyn Any> {
@@ -82,6 +82,7 @@ fn intrinsic_operation_unsafety(tcx: TyCtxt<'_>, intrinsic_id: LocalDefId) -> hi
| sym::bswap
| sym::caller_location
| sym::carrying_mul_add
| sym::carryless_mul
| sym::ceilf16
| sym::ceilf32
| sym::ceilf64
@@ -564,6 +565,7 @@ pub(crate) fn check_intrinsic_type(
(1, 0, vec![param(0), param(0)], param(0))
}
sym::saturating_add | sym::saturating_sub => (1, 0, vec![param(0), param(0)], param(0)),
sym::carryless_mul => (1, 0, vec![param(0), param(0)], param(0)),
sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
(1, 0, vec![param(0), param(0)], param(0))
}
@@ -711,7 +713,8 @@ pub(crate) fn check_intrinsic_type(
| sym::simd_fmin
| sym::simd_fmax
| sym::simd_saturating_add
| sym::simd_saturating_sub => (1, 0, vec![param(0), param(0)], param(0)),
| sym::simd_saturating_sub
| sym::simd_carryless_mul => (1, 0, vec![param(0), param(0)], param(0)),
sym::simd_arith_offset => (2, 0, vec![param(0), param(1)], param(0)),
sym::simd_neg
| sym::simd_bswap
+2
View File
@@ -648,6 +648,7 @@
caller_location,
capture_disjoint_fields,
carrying_mul_add,
carryless_mul,
catch_unwind,
cause,
cdylib,
@@ -2093,6 +2094,7 @@
simd_bitmask,
simd_bitreverse,
simd_bswap,
simd_carryless_mul,
simd_cast,
simd_cast_ptr,
simd_ceil,
@@ -58,6 +58,7 @@
use rustc_hir::intravisit::Visitor;
use rustc_hir::lang_items::LangItem;
use rustc_hir::{self as hir};
use rustc_infer::infer::DefineOpaqueTypes;
use rustc_macros::extension;
use rustc_middle::bug;
use rustc_middle::dep_graph::DepContext;
@@ -72,12 +73,17 @@
use tracing::{debug, instrument};
use crate::error_reporting::TypeErrCtxt;
use crate::error_reporting::traits::ambiguity::{
CandidateSource, compute_applicable_impls_for_diagnostics,
};
use crate::errors::{ObligationCauseFailureCode, TypeErrorAdditionalDiags};
use crate::infer;
use crate::infer::relate::{self, RelateResult, TypeRelation};
use crate::infer::{InferCtxt, InferCtxtExt as _, TypeTrace, ValuePairs};
use crate::solve::deeply_normalize_for_diagnostics;
use crate::traits::{MatchExpressionArmCause, ObligationCause, ObligationCauseCode};
use crate::traits::{
MatchExpressionArmCause, Obligation, ObligationCause, ObligationCauseCode, specialization_graph,
};
mod note_and_explain;
mod suggest;
@@ -149,11 +155,15 @@ pub fn report_mismatched_types(
actual: Ty<'tcx>,
err: TypeError<'tcx>,
) -> Diag<'a> {
self.report_and_explain_type_error(
let mut diag = self.report_and_explain_type_error(
TypeTrace::types(cause, expected, actual),
param_env,
err,
)
);
self.suggest_param_env_shadowing(&mut diag, expected, actual, param_env);
diag
}
pub fn report_mismatched_consts(
@@ -240,6 +250,76 @@ fn check_and_note_conflicting_crates(&self, err: &mut Diag<'_>, terr: TypeError<
false
}
fn suggest_param_env_shadowing(
&self,
diag: &mut Diag<'_>,
expected: Ty<'tcx>,
found: Ty<'tcx>,
param_env: ty::ParamEnv<'tcx>,
) {
let (alias, concrete) = match (expected.kind(), found.kind()) {
(ty::Alias(ty::Projection, proj), _) => (proj, found),
(_, ty::Alias(ty::Projection, proj)) => (proj, expected),
_ => return,
};
let tcx = self.tcx;
let trait_ref = alias.trait_ref(tcx);
let obligation =
Obligation::new(tcx, ObligationCause::dummy(), param_env, ty::Binder::dummy(trait_ref));
let applicable_impls = compute_applicable_impls_for_diagnostics(self.infcx, &obligation);
for candidate in applicable_impls {
let impl_def_id = match candidate {
CandidateSource::DefId(did) => did,
CandidateSource::ParamEnv(_) => continue,
};
let is_shadowed = self.infcx.probe(|_| {
let impl_substs = self.infcx.fresh_args_for_item(DUMMY_SP, impl_def_id);
let impl_trait_ref = tcx.impl_trait_ref(impl_def_id).instantiate(tcx, impl_substs);
let expected_trait_ref = alias.trait_ref(tcx);
if let Err(_) = self.infcx.at(&ObligationCause::dummy(), param_env).eq(
DefineOpaqueTypes::No,
expected_trait_ref,
impl_trait_ref,
) {
return false;
}
let leaf_def = match specialization_graph::assoc_def(tcx, impl_def_id, alias.def_id)
{
Ok(leaf) => leaf,
Err(_) => return false,
};
let trait_def_id = alias.trait_def_id(tcx);
let rebased_args = alias.args.rebase_onto(tcx, trait_def_id, impl_substs);
let impl_item_def_id = leaf_def.item.def_id;
let impl_assoc_ty = tcx.type_of(impl_item_def_id).instantiate(tcx, rebased_args);
self.infcx.can_eq(param_env, impl_assoc_ty, concrete)
});
if is_shadowed {
diag.note(format!(
"the associated type `{}` is defined as `{}` in the implementation, \
but the where-bound `{}` shadows this definition\n\
see issue #152409 <https://github.com/rust-lang/rust/issues/152409> for more information",
self.ty_to_string(tcx.mk_ty_from_kind(ty::Alias(ty::Projection, *alias))),
self.ty_to_string(concrete),
self.ty_to_string(alias.self_ty())
));
return;
}
}
}
fn note_error_origin(
&self,
err: &mut Diag<'_>,
@@ -16,7 +16,7 @@
pub mod query;
#[allow(hidden_glob_reexports)]
mod select;
mod specialize;
pub mod specialize;
mod structural_normalize;
#[allow(hidden_glob_reexports)]
mod util;
+35
View File
@@ -218,3 +218,38 @@ unsafe fn unchecked_funnel_shr(self, rhs: Self, shift: u32) -> Self {
impl_funnel_shifts! {
u8, u16, u32, u64, u128, usize
}
#[rustc_const_unstable(feature = "core_intrinsics_fallbacks", issue = "none")]
pub const trait CarrylessMul: Copy + 'static {
/// See [`super::carryless_mul`]; we just need the trait indirection to handle
/// different types since calling intrinsics with generics doesn't work.
fn carryless_mul(self, rhs: Self) -> Self;
}
macro_rules! impl_carryless_mul{
($($type:ident),*) => {$(
#[rustc_const_unstable(feature = "core_intrinsics_fallbacks", issue = "none")]
impl const CarrylessMul for $type {
#[inline]
fn carryless_mul(self, rhs: Self) -> Self {
let mut result = 0;
let mut i = 0;
while i < $type::BITS {
// If the i-th bit in rhs is set.
if (rhs >> i) & 1 != 0 {
// Then xor the result with `self` shifted to the left by i positions.
result ^= self << i;
}
i += 1;
}
result
}
}
)*};
}
impl_carryless_mul! {
u8, u16, u32, u64, u128, usize
}
+13
View File
@@ -2178,6 +2178,19 @@ pub const fn rotate_right<T: [const] fallback::FunnelShift>(x: T, shift: u32) ->
unsafe { a.unchecked_funnel_shr(b, shift) }
}
/// Carryless multiply.
///
/// Safe versions of this intrinsic are available on the integer primitives
/// via the `carryless_mul` method. For example, [`u32::carryless_mul`].
#[rustc_intrinsic]
#[rustc_nounwind]
#[rustc_const_unstable(feature = "uint_carryless_mul", issue = "152080")]
#[unstable(feature = "uint_carryless_mul", issue = "152080")]
#[miri::intrinsic_fallback_is_spec]
pub const fn carryless_mul<T: [const] fallback::CarrylessMul>(a: T, b: T) -> T {
a.carryless_mul(b)
}
/// This is an implementation detail of [`crate::ptr::read`] and should
/// not be used anywhere else. See its comments for why this exists.
///
+12
View File
@@ -162,6 +162,18 @@
#[rustc_nounwind]
pub const unsafe fn simd_funnel_shr<T>(a: T, b: T, shift: T) -> T;
/// Compute the carry-less product.
///
/// This is similar to long multiplication except that the carry is discarded.
///
/// This operation can be used to model multiplication in `GF(2)[X]`, the polynomial
/// ring over `GF(2)`.
///
/// `T` must be a vector of integers.
#[rustc_intrinsic]
#[rustc_nounwind]
pub unsafe fn simd_carryless_mul<T>(a: T, b: T) -> T;
/// "And"s vectors elementwise.
///
/// `T` must be a vector of integers.
+1
View File
@@ -170,6 +170,7 @@
#![feature(trait_alias)]
#![feature(transparent_unions)]
#![feature(try_blocks)]
#![feature(uint_carryless_mul)]
#![feature(unboxed_closures)]
#![feature(unsized_fn_params)]
#![feature(with_negative_coherence)]
+6
View File
@@ -607,6 +607,9 @@ macro_rules! write {
($dst:expr, $($arg:tt)*) => {
$dst.write_fmt($crate::format_args!($($arg)*))
};
($($arg:tt)*) => {
compile_error!("requires a destination and format arguments, like `write!(dest, \"format string\", args...)`")
};
}
/// Writes formatted data into a buffer, with a newline appended.
@@ -645,6 +648,9 @@ macro_rules! writeln {
($dst:expr, $($arg:tt)*) => {
$dst.write_fmt($crate::format_args_nl!($($arg)*))
};
($($arg:tt)*) => {
compile_error!("requires a destination and format arguments, like `writeln!(dest, \"format string\", args...)`")
};
}
/// Indicates unreachable code.
+137 -3
View File
@@ -244,6 +244,104 @@ pub const fn midpoint(self, rhs: $SelfT) -> $SelfT {
};
}
macro_rules! widening_carryless_mul_impl {
($SelfT:ty, $WideT:ty) => {
/// Performs a widening carry-less multiplication.
///
/// # Examples
///
/// ```
/// #![feature(uint_carryless_mul)]
///
#[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.widening_carryless_mul(",
stringify!($SelfT), "::MAX), ", stringify!($WideT), "::MAX / 3);")]
/// ```
#[rustc_const_unstable(feature = "uint_carryless_mul", issue = "152080")]
#[doc(alias = "clmul")]
#[unstable(feature = "uint_carryless_mul", issue = "152080")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
pub const fn widening_carryless_mul(self, rhs: $SelfT) -> $WideT {
(self as $WideT).carryless_mul(rhs as $WideT)
}
}
}
macro_rules! carrying_carryless_mul_impl {
(u128, u256) => {
carrying_carryless_mul_impl! { @internal u128 =>
pub const fn carrying_carryless_mul(self, rhs: Self, carry: Self) -> (Self, Self) {
let x0 = self as u64;
let x1 = (self >> 64) as u64;
let y0 = rhs as u64;
let y1 = (rhs >> 64) as u64;
let z0 = u64::widening_carryless_mul(x0, y0);
let z2 = u64::widening_carryless_mul(x1, y1);
// The grade school algorithm would compute:
// z1 = x0y1 ^ x1y0
// Instead, Karatsuba first computes:
let z3 = u64::widening_carryless_mul(x0 ^ x1, y0 ^ y1);
// Since it distributes over XOR,
// z3 == x0y0 ^ x0y1 ^ x1y0 ^ x1y1
// |--| |---------| |--|
// == z0 ^ z1 ^ z2
// so we can compute z1 as
let z1 = z3 ^ z0 ^ z2;
let lo = z0 ^ (z1 << 64);
let hi = z2 ^ (z1 >> 64);
(lo ^ carry, hi)
}
}
};
($SelfT:ty, $WideT:ty) => {
carrying_carryless_mul_impl! { @internal $SelfT =>
pub const fn carrying_carryless_mul(self, rhs: Self, carry: Self) -> (Self, Self) {
// Can't use widening_carryless_mul because it's not implemented for usize.
let p = (self as $WideT).carryless_mul(rhs as $WideT);
let lo = (p as $SelfT);
let hi = (p >> Self::BITS) as $SelfT;
(lo ^ carry, hi)
}
}
};
(@internal $SelfT:ty => $($fn:tt)*) => {
/// Calculates the "full carryless multiplication" without the possibility to overflow.
///
/// This returns the low-order (wrapping) bits and the high-order (overflow) bits
/// of the result as two separate values, in that order.
///
/// # Examples
///
/// Please note that this example is shared among integer types, which is why `u8` is used.
///
/// ```
/// #![feature(uint_carryless_mul)]
///
/// assert_eq!(0b1000_0000u8.carrying_carryless_mul(0b1000_0000, 0b0000), (0, 0b0100_0000));
/// assert_eq!(0b1000_0000u8.carrying_carryless_mul(0b1000_0000, 0b1111), (0b1111, 0b0100_0000));
#[doc = concat!("assert_eq!(",
stringify!($SelfT), "::MAX.carrying_carryless_mul(", stringify!($SelfT), "::MAX, ", stringify!($SelfT), "::MAX), ",
"(!(", stringify!($SelfT), "::MAX / 3), ", stringify!($SelfT), "::MAX / 3));"
)]
/// ```
#[rustc_const_unstable(feature = "uint_carryless_mul", issue = "152080")]
#[doc(alias = "clmul")]
#[unstable(feature = "uint_carryless_mul", issue = "152080")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
$($fn)*
}
}
impl i8 {
int_impl! {
Self = i8,
@@ -458,6 +556,9 @@ impl u8 {
fsh_op = "0x36",
fshl_result = "0x8",
fshr_result = "0x8d",
clmul_lhs = "0x12",
clmul_rhs = "0x34",
clmul_result = "0x28",
swap_op = "0x12",
swapped = "0x12",
reversed = "0x48",
@@ -468,6 +569,8 @@ impl u8 {
bound_condition = "",
}
midpoint_impl! { u8, u16, unsigned }
widening_carryless_mul_impl! { u8, u16 }
carrying_carryless_mul_impl! { u8, u16 }
/// Checks if the value is within the ASCII range.
///
@@ -1095,6 +1198,9 @@ impl u16 {
fsh_op = "0x2de",
fshl_result = "0x30",
fshr_result = "0x302d",
clmul_lhs = "0x9012",
clmul_rhs = "0xcd34",
clmul_result = "0x928",
swap_op = "0x1234",
swapped = "0x3412",
reversed = "0x2c48",
@@ -1105,6 +1211,8 @@ impl u16 {
bound_condition = "",
}
midpoint_impl! { u16, u32, unsigned }
widening_carryless_mul_impl! { u16, u32 }
carrying_carryless_mul_impl! { u16, u32 }
/// Checks if the value is a Unicode surrogate code point, which are disallowed values for [`char`].
///
@@ -1145,6 +1253,9 @@ impl u32 {
fsh_op = "0x2fe78e45",
fshl_result = "0xb32f",
fshr_result = "0xb32fe78e",
clmul_lhs = "0x56789012",
clmul_rhs = "0xf52ecd34",
clmul_result = "0x9b980928",
swap_op = "0x12345678",
swapped = "0x78563412",
reversed = "0x1e6a2c48",
@@ -1155,6 +1266,8 @@ impl u32 {
bound_condition = "",
}
midpoint_impl! { u32, u64, unsigned }
widening_carryless_mul_impl! { u32, u64 }
carrying_carryless_mul_impl! { u32, u64 }
}
impl u64 {
@@ -1171,6 +1284,9 @@ impl u64 {
fsh_op = "0x2fe78e45983acd98",
fshl_result = "0x6e12fe",
fshr_result = "0x6e12fe78e45983ac",
clmul_lhs = "0x7890123456789012",
clmul_rhs = "0xdd358416f52ecd34",
clmul_result = "0xa6299579b980928",
swap_op = "0x1234567890123456",
swapped = "0x5634129078563412",
reversed = "0x6a2c48091e6a2c48",
@@ -1181,6 +1297,8 @@ impl u64 {
bound_condition = "",
}
midpoint_impl! { u64, u128, unsigned }
widening_carryless_mul_impl! { u64, u128 }
carrying_carryless_mul_impl! { u64, u128 }
}
impl u128 {
@@ -1197,6 +1315,9 @@ impl u128 {
fsh_op = "0x2fe78e45983acd98039000008736273",
fshl_result = "0x4f7602fe",
fshr_result = "0x4f7602fe78e45983acd9803900000873",
clmul_lhs = "0x12345678901234567890123456789012",
clmul_rhs = "0x4317e40ab4ddcf05dd358416f52ecd34",
clmul_result = "0xb9cf660de35d0c170a6299579b980928",
swap_op = "0x12345678901234567890123456789012",
swapped = "0x12907856341290785634129078563412",
reversed = "0x48091e6a2c48091e6a2c48091e6a2c48",
@@ -1209,6 +1330,7 @@ impl u128 {
bound_condition = "",
}
midpoint_impl! { u128, unsigned }
carrying_carryless_mul_impl! { u128, u256 }
}
#[cfg(target_pointer_width = "16")]
@@ -1223,9 +1345,12 @@ impl usize {
rot = 4,
rot_op = "0xa003",
rot_result = "0x3a",
fsh_op = "0x2fe78e45983acd98039000008736273",
fshl_result = "0x4f7602fe",
fshr_result = "0x4f7602fe78e45983acd9803900000873",
fsh_op = "0x2de",
fshl_result = "0x30",
fshr_result = "0x302d",
clmul_lhs = "0x9012",
clmul_rhs = "0xcd34",
clmul_result = "0x928",
swap_op = "0x1234",
swapped = "0x3412",
reversed = "0x2c48",
@@ -1236,6 +1361,7 @@ impl usize {
bound_condition = " on 16-bit targets",
}
midpoint_impl! { usize, u32, unsigned }
carrying_carryless_mul_impl! { usize, u32 }
}
#[cfg(target_pointer_width = "32")]
@@ -1253,6 +1379,9 @@ impl usize {
fsh_op = "0x2fe78e45",
fshl_result = "0xb32f",
fshr_result = "0xb32fe78e",
clmul_lhs = "0x56789012",
clmul_rhs = "0xf52ecd34",
clmul_result = "0x9b980928",
swap_op = "0x12345678",
swapped = "0x78563412",
reversed = "0x1e6a2c48",
@@ -1263,6 +1392,7 @@ impl usize {
bound_condition = " on 32-bit targets",
}
midpoint_impl! { usize, u64, unsigned }
carrying_carryless_mul_impl! { usize, u64 }
}
#[cfg(target_pointer_width = "64")]
@@ -1280,6 +1410,9 @@ impl usize {
fsh_op = "0x2fe78e45983acd98",
fshl_result = "0x6e12fe",
fshr_result = "0x6e12fe78e45983ac",
clmul_lhs = "0x7890123456789012",
clmul_rhs = "0xdd358416f52ecd34",
clmul_result = "0xa6299579b980928",
swap_op = "0x1234567890123456",
swapped = "0x5634129078563412",
reversed = "0x6a2c48091e6a2c48",
@@ -1290,6 +1423,7 @@ impl usize {
bound_condition = " on 64-bit targets",
}
midpoint_impl! { usize, u128, unsigned }
carrying_carryless_mul_impl! { usize, u128 }
}
impl usize {
+59
View File
@@ -17,6 +17,9 @@ macro_rules! uint_impl {
fsh_op = $fsh_op:literal,
fshl_result = $fshl_result:literal,
fshr_result = $fshr_result:literal,
clmul_lhs = $clmul_rhs:literal,
clmul_rhs = $clmul_lhs:literal,
clmul_result = $clmul_result:literal,
swap_op = $swap_op:literal,
swapped = $swapped:literal,
reversed = $reversed:literal,
@@ -482,6 +485,62 @@ pub const fn funnel_shr(self, rhs: Self, n: u32) -> Self {
unsafe { intrinsics::unchecked_funnel_shr(self, rhs, n) }
}
/// Performs a carry-less multiplication, returning the lower bits.
///
/// This operation is similar to long multiplication, except that exclusive or is used
/// instead of addition. The implementation is equivalent to:
///
/// ```no_run
#[doc = concat!("pub fn carryless_mul(lhs: ", stringify!($SelfT), ", rhs: ", stringify!($SelfT), ") -> ", stringify!($SelfT), "{")]
/// let mut retval = 0;
#[doc = concat!(" for i in 0..", stringify!($SelfT), "::BITS {")]
/// if (rhs >> i) & 1 != 0 {
/// // long multiplication would use +=
/// retval ^= lhs << i;
/// }
/// }
/// retval
/// }
/// ```
///
/// The actual implementation is more efficient, and on some platforms lowers directly to a
/// dedicated instruction.
///
/// # Uses
///
/// Carryless multiplication can be used to turn a bitmask of quote characters into a
/// bit mask of characters surrounded by quotes:
///
/// ```no_run
/// r#"abc xxx "foobar" zzz "a"!"#; // input string
/// 0b0000000010000001000001010; // quote_mask
/// 0b0000000001111110000000100; // quote_mask.carryless_mul(!0) & !quote_mask
/// ```
///
/// Another use is in cryptography, where carryless multiplication allows for efficient
/// implementations of polynomial multiplication in `GF(2)[X]`, the polynomial ring
/// over `GF(2)`.
///
/// # Examples
///
/// ```
/// #![feature(uint_carryless_mul)]
///
#[doc = concat!("let a = ", $clmul_lhs, stringify!($SelfT), ";")]
#[doc = concat!("let b = ", $clmul_rhs, stringify!($SelfT), ";")]
///
#[doc = concat!("assert_eq!(a.carryless_mul(b), ", $clmul_result, ");")]
/// ```
#[rustc_const_unstable(feature = "uint_carryless_mul", issue = "152080")]
#[doc(alias = "clmul")]
#[unstable(feature = "uint_carryless_mul", issue = "152080")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline(always)]
pub const fn carryless_mul(self, rhs: Self) -> Self {
intrinsics::carryless_mul(self, rhs)
}
/// Reverses the byte order of the integer.
///
/// # Examples
+1
View File
@@ -116,6 +116,7 @@
#![feature(try_trait_v2)]
#![feature(type_info)]
#![feature(uint_bit_width)]
#![feature(uint_carryless_mul)]
#![feature(uint_gather_scatter_bits)]
#![feature(unsize)]
#![feature(unwrap_infallible)]
@@ -0,0 +1,254 @@
//! Tests the `Unsigned::{carryless_mul, widening_carryless_mul, carrying_carryless_mul}` methods.
#[test]
fn carryless_mul_u128() {
assert_eq_const_safe!(u128: <u128>::carryless_mul(0, 0), 0);
assert_eq_const_safe!(u128: <u128>::carryless_mul(1, 1), 1);
assert_eq_const_safe!(
u128: <u128>::carryless_mul(
0x0123456789ABCDEF_FEDCBA9876543210,
1u128 << 64,
),
0xFEDCBA9876543210_0000000000000000
);
assert_eq_const_safe!(
u128: <u128>::carryless_mul(
0x0123456789ABCDEF_FEDCBA9876543210,
(1u128 << 64) | 1,
),
0xFFFFFFFFFFFFFFFF_FEDCBA9876543210
);
assert_eq_const_safe!(
u128: <u128>::carryless_mul(
0x0123456789ABCDEF_FEDCBA9876543211,
1u128 << 127,
),
0x8000000000000000_0000000000000000
);
assert_eq_const_safe!(
u128: <u128>::carryless_mul(
0xAAAAAAAAAAAAAAAA_AAAAAAAAAAAAAAAA,
0x5555555555555555_5555555555555555,
),
0x2222222222222222_2222222222222222
);
assert_eq_const_safe!(
u128: <u128>::carryless_mul(
(1 << 127) | (1 << 64) | 1,
(1 << 63) | 1
),
(1 << 64) | (1 << 63) | 1
);
assert_eq_const_safe!(
u128: <u128>::carryless_mul(
0x8000000000000000_0000000000000001,
0x7FFFFFFFFFFFFFFF_FFFFFFFFFFFFFFFF,
),
0xFFFFFFFFFFFFFFFF_FFFFFFFFFFFFFFFF
);
}
#[test]
fn carryless_mul_u64() {
assert_eq_const_safe!(u64: <u64>::carryless_mul(0, 0), 0);
assert_eq_const_safe!(u64: <u64>::carryless_mul(1, 1), 1);
assert_eq_const_safe!(
u64: <u64>::carryless_mul(
0x0123_4567_89AB_CDEF,
1u64 << 32,
),
0x89AB_CDEF_0000_0000
);
assert_eq_const_safe!(
u64: <u64>::carryless_mul(
0x0123_4567_89AB_CDEF,
(1u64 << 32) | 1,
),
0x8888_8888_89AB_CDEF
);
assert_eq_const_safe!(
u64: <u64>::carryless_mul(
0x0123_4567_89AB_CDEF,
1u64 << 63,
),
0x8000_0000_0000_0000
);
assert_eq_const_safe!(
u64: <u64>::carryless_mul(
0xAAAA_AAAA_AAAA_AAAA,
0x5555_5555_5555_5555,
),
0x2222_2222_2222_2222
);
assert_eq_const_safe!(
u64: <u64>::carryless_mul(
(1u64 << 63) | (1u64 << 32) | 1,
(1u64 << 31) | 1,
),
(1u64 << 32) | (1u64 << 31) | 1
);
assert_eq_const_safe!(
u64: <u64>::carryless_mul(
0x8000_0000_0000_0001,
0x7FFF_FFFF_FFFF_FFFF,
),
0xFFFF_FFFF_FFFF_FFFF
);
}
#[test]
fn carryless_mul_u32() {
assert_eq_const_safe!(
u32: <u32>::carryless_mul(0x0123_4567, 1u32 << 16),
0x4567_0000
);
assert_eq_const_safe!(
u32: <u32>::carryless_mul(0xAAAA_AAAA, 0x5555_5555),
0x2222_2222
);
}
#[test]
fn carryless_mul_u16() {
assert_eq_const_safe!(
u16: <u16>::carryless_mul(0x0123, 1u16 << 8),
0x2300
);
assert_eq_const_safe!(
u16: <u16>::carryless_mul(0xAAAA, 0x5555),
0x2222
);
}
#[test]
fn carryless_mul_u8() {
assert_eq_const_safe!(
u8: <u8>::carryless_mul(0x01, 1u8 << 4),
0x10
);
assert_eq_const_safe!(
u8: <u8>::carryless_mul(0xAA, 0x55),
0x22
);
}
#[test]
fn widening_carryless_mul() {
assert_eq_const_safe!(
u16: <u8>::widening_carryless_mul(0xEFu8, 1u8 << 7),
0x7780u16
);
assert_eq_const_safe!(
u16: <u8>::widening_carryless_mul(0xEFu8, (1u8 << 7) | 1),
0x776Fu16
);
assert_eq_const_safe!(
u32: <u16>::widening_carryless_mul(0xBEEFu16, 1u16 << 15),
0x5F77_8000u32
);
assert_eq_const_safe!(
u32: <u16>::widening_carryless_mul(0xBEEFu16, (1u16 << 15) | 1),
0x5F77_3EEFu32
);
assert_eq_const_safe!(
u64: <u32>::widening_carryless_mul(0xDEAD_BEEFu32, 1u32 << 31),
0x6F56_DF77_8000_0000u64
);
assert_eq_const_safe!(
u64: <u32>::widening_carryless_mul(0xDEAD_BEEFu32, (1u32 << 31) | 1),
0x6F56_DF77_5EAD_BEEFu64
);
assert_eq_const_safe!(
u128: <u64>::widening_carryless_mul(0xDEAD_BEEF_FACE_FEEDu64, 1u64 << 63),
147995377545877439359040026616086396928
);
assert_eq_const_safe!(
u128: <u64>::widening_carryless_mul(0xDEAD_BEEF_FACE_FEEDu64, (1u64 << 63) | 1),
147995377545877439356638973527682121453
);
}
#[test]
fn carrying_carryless_mul() {
assert_eq_const_safe!(
(u8, u8): <u8>::carrying_carryless_mul(0xEFu8, 1u8 << 7, 0),
(0x80u8, 0x77u8)
);
assert_eq_const_safe!(
(u8, u8): <u8>::carrying_carryless_mul(0xEFu8, (1u8 << 7) | 1, 0xEF),
(0x80u8, 0x77u8)
);
assert_eq_const_safe!(
(u16, u16): <u16>::carrying_carryless_mul(0xBEEFu16, 1u16 << 15, 0),
(0x8000u16, 0x5F77u16)
);
assert_eq_const_safe!(
(u16, u16): <u16>::carrying_carryless_mul(0xBEEFu16, (1u16 << 15) | 1, 0xBEEF),
(0x8000u16, 0x5F77u16)
);
assert_eq_const_safe!(
(u32, u32): <u32>::carrying_carryless_mul(0xDEAD_BEEFu32, 1u32 << 31, 0),
(0x8000_0000u32, 0x6F56_DF77u32)
);
assert_eq_const_safe!(
(u32, u32): <u32>::carrying_carryless_mul(0xDEAD_BEEFu32, (1u32 << 31) | 1, 0xDEAD_BEEF),
(0x8000_0000u32, 0x6F56_DF77u32)
);
assert_eq_const_safe!(
(u64, u64): <u64>::carrying_carryless_mul(0xDEAD_BEEF_FACE_FEEDu64, 1u64 << 63, 0),
(9223372036854775808, 8022845492652638070)
);
assert_eq_const_safe!(
(u64, u64): <u64>::carrying_carryless_mul(
0xDEAD_BEEF_FACE_FEEDu64,
(1u64 << 63) | 1,
0xDEAD_BEEF_FACE_FEED,
),
(9223372036854775808, 8022845492652638070)
);
assert_eq_const_safe!(
(u128, u128): <u128>::carrying_carryless_mul(
0xDEAD_BEEF_FACE_FEED_0123_4567_89AB_CDEFu128,
1u128 << 127,
0,
),
(
0x8000_0000_0000_0000_0000_0000_0000_0000u128,
147995377545877439359081019380694640375,
)
);
assert_eq_const_safe!(
(u128, u128): <u128>::carrying_carryless_mul(
0xDEAD_BEEF_FACE_FEED_0123_4567_89AB_CDEFu128,
(1u128 << 127) | 1,
0xDEAD_BEEF_FACE_FEED_0123_4567_89AB_CDEF,
),
(
0x8000_0000_0000_0000_0000_0000_0000_0000u128,
147995377545877439359081019380694640375,
)
);
}
+1
View File
@@ -22,6 +22,7 @@
mod u8;
mod bignum;
mod carryless_mul;
mod const_from;
mod dec2flt;
mod float_iter_sum_identity;
@@ -117,6 +117,13 @@ fn test_funnel_shift() {
assert_eq_const_safe!($T: <$T>::funnel_shr(_1, _1, 4), <$T>::rotate_right(_1, 4));
}
fn test_carryless_mul() {
assert_eq_const_safe!($T: <$T>::carryless_mul(0, 0), 0);
assert_eq_const_safe!($T: <$T>::carryless_mul(1, 1), 1);
assert_eq_const_safe!($T: <$T>::carryless_mul(0b0100, 2), 0b1000);
}
fn test_swap_bytes() {
assert_eq_const_safe!($T: A.swap_bytes().swap_bytes(), A);
assert_eq_const_safe!($T: B.swap_bytes().swap_bytes(), B);
+1
View File
@@ -315,6 +315,7 @@
#![feature(try_blocks)]
#![feature(try_trait_v2)]
#![feature(type_alias_impl_trait)]
#![feature(uint_carryless_mul)]
// tidy-alphabetical-end
//
// Library features (core):
@@ -12,6 +12,8 @@
/// A structure representing a Unix domain socket server.
///
/// Under Windows, it will only work starting from Windows 10 17063.
///
/// # Examples
///
/// ```no_run
+2
View File
@@ -17,6 +17,8 @@
use crate::{fmt, io};
/// A Unix stream socket.
///
/// Under Windows, it will only work starting from Windows 10 17063.
///
/// # Examples
///
/// ```no_run
+134 -1
View File
@@ -5,10 +5,24 @@
// in the future, will test both unix and windows uds
use std::io::{Read, Write};
use std::os::windows::net::{UnixListener, UnixStream};
use std::thread;
use std::{mem, thread};
macro_rules! skip_nonapplicable_oses {
() => {
// UDS have been available under Windows since Insider Preview Build
// 17063. "Redstone 4" (RS4, version 1803, build number 17134) is
// therefore the first official release to include it.
if !is_windows_10_v1803_or_greater() {
println!("Not running this test on too-old Windows.");
return;
}
};
}
#[test]
fn win_uds_smoke_bind_connect() {
skip_nonapplicable_oses!();
let tmp = std::env::temp_dir();
let sock_path = tmp.join("rust-test-uds-smoke.sock");
let _ = std::fs::remove_file(&sock_path);
@@ -32,6 +46,8 @@ fn win_uds_smoke_bind_connect() {
#[test]
fn win_uds_echo() {
skip_nonapplicable_oses!();
let tmp = std::env::temp_dir();
let sock_path = tmp.join("rust-test-uds-echo.sock");
let _ = std::fs::remove_file(&sock_path);
@@ -68,14 +84,19 @@ fn win_uds_echo() {
#[test]
fn win_uds_path_too_long() {
skip_nonapplicable_oses!();
let tmp = std::env::temp_dir();
let long_path = tmp.join("a".repeat(200));
let result = UnixListener::bind(&long_path);
assert!(result.is_err());
let _ = std::fs::remove_file(&long_path);
}
#[test]
fn win_uds_existing_bind() {
skip_nonapplicable_oses!();
let tmp = std::env::temp_dir();
let sock_path = tmp.join("rust-test-uds-existing.sock");
let _ = std::fs::remove_file(&sock_path);
@@ -85,3 +106,115 @@ fn win_uds_existing_bind() {
drop(listener);
let _ = std::fs::remove_file(&sock_path);
}
/// Returns true if we are currently running on Windows 10 v1803 (RS4) or greater.
fn is_windows_10_v1803_or_greater() -> bool {
is_windows_version_greater_or_equal(NTDDI_WIN10_RS4)
}
/// Returns true if we are currently running on the given version of Windows
/// 10 (or newer).
fn is_windows_version_greater_or_equal(min_version: u32) -> bool {
is_windows_version_or_greater(HIBYTE(OSVER(min_version)), LOBYTE(OSVER(min_version)), 0, 0)
}
/// Checks if we are running a version of Windows newer than the specified one.
fn is_windows_version_or_greater(
major: u8,
minor: u8,
service_pack: u8,
build_number: u32,
) -> bool {
let mut osvi = OSVERSIONINFOEXW {
dwOSVersionInfoSize: mem::size_of::<OSVERSIONINFOEXW>() as _,
dwMajorVersion: u32::from(major),
dwMinorVersion: u32::from(minor),
wServicePackMajor: u16::from(service_pack),
dwBuildNumber: build_number,
..OSVERSIONINFOEXW::default()
};
// SAFETY: this function is always safe to call.
let condmask = unsafe {
VerSetConditionMask(
VerSetConditionMask(
VerSetConditionMask(
VerSetConditionMask(0, VER_MAJORVERSION, VER_GREATER_EQUAL as _),
VER_MINORVERSION,
VER_GREATER_EQUAL as _,
),
VER_SERVICEPACKMAJOR,
VER_GREATER_EQUAL as _,
),
VER_BUILDNUMBER,
VER_GREATER_EQUAL as _,
)
};
// SAFETY: osvi needs to point to a memory region valid for at least
// dwOSVersionInfoSize bytes, which is the case here.
(unsafe {
RtlVerifyVersionInfo(
&raw mut osvi,
VER_MAJORVERSION | VER_MINORVERSION | VER_SERVICEPACKMAJOR,
condmask,
)
}) == STATUS_SUCCESS
}
#[expect(non_snake_case)]
const fn HIBYTE(x: u16) -> u8 {
((x >> 8) & 0xFF) as u8
}
#[expect(non_snake_case)]
const fn LOBYTE(x: u16) -> u8 {
(x & 0xFF) as u8
}
#[expect(non_snake_case)]
const fn OSVER(x: u32) -> u16 {
((x & OSVERSION_MASK) >> 16) as u16
}
// Inlined bindings because outside of `std` here.
type NTSTATUS = i32;
const STATUS_SUCCESS: NTSTATUS = 0;
#[expect(non_camel_case_types)]
type VER_FLAGS = u32;
const VER_BUILDNUMBER: VER_FLAGS = 4u32;
const VER_GREATER_EQUAL: VER_FLAGS = 3u32;
const VER_MAJORVERSION: VER_FLAGS = 2u32;
const VER_MINORVERSION: VER_FLAGS = 1u32;
const VER_SERVICEPACKMAJOR: VER_FLAGS = 32u32;
const OSVERSION_MASK: u32 = 4294901760u32;
const NTDDI_WIN10_RS4: u32 = 167772165u32;
#[expect(non_snake_case)]
#[repr(C)]
#[derive(Clone, Copy)]
struct OSVERSIONINFOEXW {
pub dwOSVersionInfoSize: u32,
pub dwMajorVersion: u32,
pub dwMinorVersion: u32,
pub dwBuildNumber: u32,
pub dwPlatformId: u32,
pub szCSDVersion: [u16; 128],
pub wServicePackMajor: u16,
pub wServicePackMinor: u16,
pub wSuiteMask: u16,
pub wProductType: u8,
pub wReserved: u8,
}
impl Default for OSVERSIONINFOEXW {
fn default() -> Self {
unsafe { core::mem::zeroed() }
}
}
windows_link::link!("ntdll.dll" "system" fn RtlVerifyVersionInfo(versioninfo : *const OSVERSIONINFOEXW, typemask : u32, conditionmask : u64) -> NTSTATUS);
windows_link::link!("kernel32.dll" "system" fn VerSetConditionMask(conditionmask : u64, typemask : VER_FLAGS, condition : u8) -> u64);
@@ -1565,7 +1565,7 @@ pub fn vceqh_f16(a: f16, b: f16) -> u16 {
#[inline(always)]
#[cfg_attr(test, assert_instr(fcmeq))]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vceqz_f16(a: float16x4_t) -> uint16x4_t {
let b: f16x4 = f16x4::new(0.0, 0.0, 0.0, 0.0);
@@ -1576,7 +1576,7 @@ pub fn vceqz_f16(a: float16x4_t) -> uint16x4_t {
#[inline(always)]
#[cfg_attr(test, assert_instr(fcmeq))]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vceqzq_f16(a: float16x8_t) -> uint16x8_t {
let b: f16x8 = f16x8::new(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0);
@@ -7283,7 +7283,7 @@ pub fn vcvtq_f64_u64(a: uint64x2_t) -> float64x2_t {
#[inline(always)]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtn2))]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vcvt_high_f16_f32(a: float16x4_t, b: float32x4_t) -> float16x8_t {
vcombine_f16(a, vcvt_f16_f32(b))
@@ -7293,7 +7293,7 @@ pub fn vcvt_high_f16_f32(a: float16x4_t, b: float32x4_t) -> float16x8_t {
#[inline(always)]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtl2))]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vcvt_high_f32_f16(a: float16x8_t) -> float32x4_t {
vcvt_f32_f16(vget_high_f16(a))
@@ -7532,7 +7532,7 @@ pub fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t {
#[inline(always)]
#[cfg_attr(test, assert_instr(fcvtas))]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vcvta_s16_f16(a: float16x4_t) -> int16x4_t {
unsafe extern "unadjusted" {
@@ -7549,7 +7549,7 @@ pub fn vcvta_s16_f16(a: float16x4_t) -> int16x4_t {
#[inline(always)]
#[cfg_attr(test, assert_instr(fcvtas))]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vcvtaq_s16_f16(a: float16x8_t) -> int16x8_t {
unsafe extern "unadjusted" {
@@ -7630,7 +7630,7 @@ pub fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t {
#[inline(always)]
#[cfg_attr(test, assert_instr(fcvtau))]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vcvta_u16_f16(a: float16x4_t) -> uint16x4_t {
unsafe extern "unadjusted" {
@@ -7647,7 +7647,7 @@ pub fn vcvta_u16_f16(a: float16x4_t) -> uint16x4_t {
#[inline(always)]
#[cfg_attr(test, assert_instr(fcvtau))]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vcvtaq_u16_f16(a: float16x8_t) -> uint16x8_t {
unsafe extern "unadjusted" {
@@ -8218,7 +8218,7 @@ pub fn vcvth_u64_f16(a: f16) -> u64 {
#[inline(always)]
#[cfg_attr(test, assert_instr(fcvtms))]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vcvtm_s16_f16(a: float16x4_t) -> int16x4_t {
unsafe extern "unadjusted" {
@@ -8235,7 +8235,7 @@ pub fn vcvtm_s16_f16(a: float16x4_t) -> int16x4_t {
#[inline(always)]
#[cfg_attr(test, assert_instr(fcvtms))]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vcvtmq_s16_f16(a: float16x8_t) -> int16x8_t {
unsafe extern "unadjusted" {
@@ -8316,7 +8316,7 @@ pub fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t {
#[inline(always)]
#[cfg_attr(test, assert_instr(fcvtmu))]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vcvtm_u16_f16(a: float16x4_t) -> uint16x4_t {
unsafe extern "unadjusted" {
@@ -8333,7 +8333,7 @@ pub fn vcvtm_u16_f16(a: float16x4_t) -> uint16x4_t {
#[inline(always)]
#[cfg_attr(test, assert_instr(fcvtmu))]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vcvtmq_u16_f16(a: float16x8_t) -> uint16x8_t {
unsafe extern "unadjusted" {
@@ -8566,7 +8566,7 @@ pub fn vcvtmd_u64_f64(a: f64) -> u64 {
#[inline(always)]
#[cfg_attr(test, assert_instr(fcvtns))]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vcvtn_s16_f16(a: float16x4_t) -> int16x4_t {
unsafe extern "unadjusted" {
@@ -8583,7 +8583,7 @@ pub fn vcvtn_s16_f16(a: float16x4_t) -> int16x4_t {
#[inline(always)]
#[cfg_attr(test, assert_instr(fcvtns))]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vcvtnq_s16_f16(a: float16x8_t) -> int16x8_t {
unsafe extern "unadjusted" {
@@ -8664,7 +8664,7 @@ pub fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t {
#[inline(always)]
#[cfg_attr(test, assert_instr(fcvtnu))]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vcvtn_u16_f16(a: float16x4_t) -> uint16x4_t {
unsafe extern "unadjusted" {
@@ -8681,7 +8681,7 @@ pub fn vcvtn_u16_f16(a: float16x4_t) -> uint16x4_t {
#[inline(always)]
#[cfg_attr(test, assert_instr(fcvtnu))]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vcvtnq_u16_f16(a: float16x8_t) -> uint16x8_t {
unsafe extern "unadjusted" {
@@ -8914,7 +8914,7 @@ pub fn vcvtnd_u64_f64(a: f64) -> u64 {
#[inline(always)]
#[cfg_attr(test, assert_instr(fcvtps))]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vcvtp_s16_f16(a: float16x4_t) -> int16x4_t {
unsafe extern "unadjusted" {
@@ -8931,7 +8931,7 @@ pub fn vcvtp_s16_f16(a: float16x4_t) -> int16x4_t {
#[inline(always)]
#[cfg_attr(test, assert_instr(fcvtps))]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vcvtpq_s16_f16(a: float16x8_t) -> int16x8_t {
unsafe extern "unadjusted" {
@@ -9012,7 +9012,7 @@ pub fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t {
#[inline(always)]
#[cfg_attr(test, assert_instr(fcvtpu))]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vcvtp_u16_f16(a: float16x4_t) -> uint16x4_t {
unsafe extern "unadjusted" {
@@ -9029,7 +9029,7 @@ pub fn vcvtp_u16_f16(a: float16x4_t) -> uint16x4_t {
#[inline(always)]
#[cfg_attr(test, assert_instr(fcvtpu))]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vcvtpq_u16_f16(a: float16x8_t) -> uint16x8_t {
unsafe extern "unadjusted" {
@@ -9493,7 +9493,7 @@ pub fn vcvtxd_f32_f64(a: f64) -> f32 {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(fdiv))]
pub fn vdiv_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
@@ -9503,7 +9503,7 @@ pub fn vdiv_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(fdiv))]
pub fn vdivq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
@@ -10108,7 +10108,7 @@ pub fn vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
#[rustc_legacy_const_generics(3)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vfma_lane_f16<const LANE: i32>(
a: float16x4_t,
@@ -10124,7 +10124,7 @@ pub fn vfma_lane_f16<const LANE: i32>(
#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
#[rustc_legacy_const_generics(3)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vfma_laneq_f16<const LANE: i32>(
a: float16x4_t,
@@ -10140,7 +10140,7 @@ pub fn vfma_laneq_f16<const LANE: i32>(
#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
#[rustc_legacy_const_generics(3)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vfmaq_lane_f16<const LANE: i32>(
a: float16x8_t,
@@ -10156,7 +10156,7 @@ pub fn vfmaq_lane_f16<const LANE: i32>(
#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
#[rustc_legacy_const_generics(3)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vfmaq_laneq_f16<const LANE: i32>(
a: float16x8_t,
@@ -10434,7 +10434,7 @@ pub fn vfmad_laneq_f64<const LANE: i32>(a: f64, b: f64, c: float64x2_t) -> f64 {
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(fmlal2))]
pub fn vfmlal_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
@@ -10452,7 +10452,7 @@ pub fn vfmlal_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float3
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(fmlal2))]
pub fn vfmlalq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
@@ -10472,7 +10472,7 @@ pub fn vfmlalq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vfmlal_lane_high_f16<const LANE: i32>(
r: float32x2_t,
@@ -10489,7 +10489,7 @@ pub fn vfmlal_lane_high_f16<const LANE: i32>(
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vfmlal_laneq_high_f16<const LANE: i32>(
r: float32x2_t,
@@ -10506,7 +10506,7 @@ pub fn vfmlal_laneq_high_f16<const LANE: i32>(
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vfmlalq_lane_high_f16<const LANE: i32>(
r: float32x4_t,
@@ -10523,7 +10523,7 @@ pub fn vfmlalq_lane_high_f16<const LANE: i32>(
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vfmlalq_laneq_high_f16<const LANE: i32>(
r: float32x4_t,
@@ -10540,7 +10540,7 @@ pub fn vfmlalq_laneq_high_f16<const LANE: i32>(
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vfmlal_lane_low_f16<const LANE: i32>(
r: float32x2_t,
@@ -10557,7 +10557,7 @@ pub fn vfmlal_lane_low_f16<const LANE: i32>(
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vfmlal_laneq_low_f16<const LANE: i32>(
r: float32x2_t,
@@ -10574,7 +10574,7 @@ pub fn vfmlal_laneq_low_f16<const LANE: i32>(
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vfmlalq_lane_low_f16<const LANE: i32>(
r: float32x4_t,
@@ -10591,7 +10591,7 @@ pub fn vfmlalq_lane_low_f16<const LANE: i32>(
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vfmlalq_laneq_low_f16<const LANE: i32>(
r: float32x4_t,
@@ -10606,7 +10606,7 @@ pub fn vfmlalq_laneq_low_f16<const LANE: i32>(
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(fmlal))]
pub fn vfmlal_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
@@ -10624,7 +10624,7 @@ pub fn vfmlal_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(fmlal))]
pub fn vfmlalq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
@@ -10642,7 +10642,7 @@ pub fn vfmlalq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float3
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(fmlsl2))]
pub fn vfmlsl_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
@@ -10660,7 +10660,7 @@ pub fn vfmlsl_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float3
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(fmlsl2))]
pub fn vfmlslq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
@@ -10680,7 +10680,7 @@ pub fn vfmlslq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vfmlsl_lane_high_f16<const LANE: i32>(
r: float32x2_t,
@@ -10697,7 +10697,7 @@ pub fn vfmlsl_lane_high_f16<const LANE: i32>(
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vfmlsl_laneq_high_f16<const LANE: i32>(
r: float32x2_t,
@@ -10714,7 +10714,7 @@ pub fn vfmlsl_laneq_high_f16<const LANE: i32>(
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vfmlslq_lane_high_f16<const LANE: i32>(
r: float32x4_t,
@@ -10731,7 +10731,7 @@ pub fn vfmlslq_lane_high_f16<const LANE: i32>(
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vfmlslq_laneq_high_f16<const LANE: i32>(
r: float32x4_t,
@@ -10748,7 +10748,7 @@ pub fn vfmlslq_laneq_high_f16<const LANE: i32>(
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vfmlsl_lane_low_f16<const LANE: i32>(
r: float32x2_t,
@@ -10765,7 +10765,7 @@ pub fn vfmlsl_lane_low_f16<const LANE: i32>(
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vfmlsl_laneq_low_f16<const LANE: i32>(
r: float32x2_t,
@@ -10782,7 +10782,7 @@ pub fn vfmlsl_laneq_low_f16<const LANE: i32>(
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vfmlslq_lane_low_f16<const LANE: i32>(
r: float32x4_t,
@@ -10799,7 +10799,7 @@ pub fn vfmlslq_lane_low_f16<const LANE: i32>(
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vfmlslq_laneq_low_f16<const LANE: i32>(
r: float32x4_t,
@@ -10814,7 +10814,7 @@ pub fn vfmlslq_laneq_low_f16<const LANE: i32>(
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(fmlsl))]
pub fn vfmlsl_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
@@ -10832,7 +10832,7 @@ pub fn vfmlsl_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(fmlsl))]
pub fn vfmlslq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
@@ -10863,7 +10863,7 @@ pub fn vfms_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
#[rustc_legacy_const_generics(3)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vfms_lane_f16<const LANE: i32>(
a: float16x4_t,
@@ -10879,7 +10879,7 @@ pub fn vfms_lane_f16<const LANE: i32>(
#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
#[rustc_legacy_const_generics(3)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vfms_laneq_f16<const LANE: i32>(
a: float16x4_t,
@@ -10895,7 +10895,7 @@ pub fn vfms_laneq_f16<const LANE: i32>(
#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
#[rustc_legacy_const_generics(3)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vfmsq_lane_f16<const LANE: i32>(
a: float16x8_t,
@@ -10911,7 +10911,7 @@ pub fn vfmsq_lane_f16<const LANE: i32>(
#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
#[rustc_legacy_const_generics(3)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vfmsq_laneq_f16<const LANE: i32>(
a: float16x8_t,
@@ -15085,7 +15085,7 @@ pub fn vmul_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) -> float64
#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vmul_laneq_f16<const LANE: i32>(a: float16x4_t, b: float16x8_t) -> float16x4_t {
static_assert_uimm_bits!(LANE, 3);
@@ -15102,7 +15102,7 @@ pub fn vmul_laneq_f16<const LANE: i32>(a: float16x4_t, b: float16x8_t) -> float1
#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vmulq_laneq_f16<const LANE: i32>(a: float16x8_t, b: float16x8_t) -> float16x8_t {
static_assert_uimm_bits!(LANE, 3);
@@ -15609,7 +15609,7 @@ pub fn vmuld_laneq_f64<const LANE: i32>(a: f64, b: float64x2_t) -> f64 {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(fmulx))]
pub fn vmulx_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
@@ -15626,7 +15626,7 @@ pub fn vmulx_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(fmulx))]
pub fn vmulxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
@@ -15709,7 +15709,7 @@ pub fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vmulx_lane_f16<const LANE: i32>(a: float16x4_t, b: float16x4_t) -> float16x4_t {
static_assert_uimm_bits!(LANE, 2);
@@ -15726,7 +15726,7 @@ pub fn vmulx_lane_f16<const LANE: i32>(a: float16x4_t, b: float16x4_t) -> float1
#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vmulx_laneq_f16<const LANE: i32>(a: float16x4_t, b: float16x8_t) -> float16x4_t {
static_assert_uimm_bits!(LANE, 3);
@@ -15743,7 +15743,7 @@ pub fn vmulx_laneq_f16<const LANE: i32>(a: float16x4_t, b: float16x8_t) -> float
#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vmulxq_lane_f16<const LANE: i32>(a: float16x8_t, b: float16x4_t) -> float16x8_t {
static_assert_uimm_bits!(LANE, 2);
@@ -15773,7 +15773,7 @@ pub fn vmulxq_lane_f16<const LANE: i32>(a: float16x8_t, b: float16x4_t) -> float
#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vmulxq_laneq_f16<const LANE: i32>(a: float16x8_t, b: float16x8_t) -> float16x8_t {
static_assert_uimm_bits!(LANE, 3);
@@ -16135,7 +16135,7 @@ pub fn vpaddd_u64(a: uint64x2_t) -> u64 {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(faddp))]
pub fn vpaddq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
@@ -16354,7 +16354,7 @@ pub fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(fmaxp))]
pub fn vpmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
@@ -16371,7 +16371,7 @@ pub fn vpmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(fmaxp))]
pub fn vpmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
@@ -16388,7 +16388,7 @@ pub fn vpmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(fmaxnmp))]
pub fn vpmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
@@ -16405,7 +16405,7 @@ pub fn vpmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(fmaxnmp))]
pub fn vpmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
@@ -16662,7 +16662,7 @@ pub fn vpmaxs_f32(a: float32x2_t) -> f32 {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(fminp))]
pub fn vpmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
@@ -16679,7 +16679,7 @@ pub fn vpmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(fminp))]
pub fn vpminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
@@ -16696,7 +16696,7 @@ pub fn vpminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(fminnmp))]
pub fn vpminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
@@ -16713,7 +16713,7 @@ pub fn vpminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(fminnmp))]
pub fn vpminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
@@ -21839,7 +21839,7 @@ pub fn vrecpxh_f16(a: f16) -> f16 {
#[inline(always)]
#[cfg(target_endian = "little")]
#[target_feature(enable = "neon")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(nop))]
pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t {
@@ -21850,7 +21850,7 @@ pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t {
#[inline(always)]
#[cfg(target_endian = "big")]
#[target_feature(enable = "neon")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(nop))]
pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t {
@@ -21862,7 +21862,7 @@ pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t {
#[inline(always)]
#[cfg(target_endian = "little")]
#[target_feature(enable = "neon")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(nop))]
pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t {
@@ -21873,7 +21873,7 @@ pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t {
#[inline(always)]
#[cfg(target_endian = "big")]
#[target_feature(enable = "neon")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(nop))]
pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t {
@@ -21888,7 +21888,7 @@ pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t {
#[inline(always)]
#[cfg(target_endian = "little")]
#[target_feature(enable = "neon")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(nop))]
pub fn vreinterpret_f16_f64(a: float64x1_t) -> float16x4_t {
@@ -21899,7 +21899,7 @@ pub fn vreinterpret_f16_f64(a: float64x1_t) -> float16x4_t {
#[inline(always)]
#[cfg(target_endian = "big")]
#[target_feature(enable = "neon")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(nop))]
pub fn vreinterpret_f16_f64(a: float64x1_t) -> float16x4_t {
@@ -21913,7 +21913,7 @@ pub fn vreinterpret_f16_f64(a: float64x1_t) -> float16x4_t {
#[inline(always)]
#[cfg(target_endian = "little")]
#[target_feature(enable = "neon")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(nop))]
pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t {
@@ -21924,7 +21924,7 @@ pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t {
#[inline(always)]
#[cfg(target_endian = "big")]
#[target_feature(enable = "neon")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(nop))]
pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t {
@@ -23503,7 +23503,7 @@ pub fn vrnd64z_f64(a: float64x1_t) -> float64x1_t {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(frintz))]
pub fn vrnd_f16(a: float16x4_t) -> float16x4_t {
@@ -23513,7 +23513,7 @@ pub fn vrnd_f16(a: float16x4_t) -> float16x4_t {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(frintz))]
pub fn vrndq_f16(a: float16x8_t) -> float16x8_t {
@@ -23559,7 +23559,7 @@ pub fn vrndq_f64(a: float64x2_t) -> float64x2_t {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(frinta))]
pub fn vrnda_f16(a: float16x4_t) -> float16x4_t {
@@ -23569,7 +23569,7 @@ pub fn vrnda_f16(a: float16x4_t) -> float16x4_t {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(frinta))]
pub fn vrndaq_f16(a: float16x8_t) -> float16x8_t {
@@ -23635,7 +23635,7 @@ pub fn vrndh_f16(a: f16) -> f16 {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(frinti))]
pub fn vrndi_f16(a: float16x4_t) -> float16x4_t {
@@ -23652,7 +23652,7 @@ pub fn vrndi_f16(a: float16x4_t) -> float16x4_t {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(frinti))]
pub fn vrndiq_f16(a: float16x8_t) -> float16x8_t {
@@ -23750,7 +23750,7 @@ pub fn vrndih_f16(a: f16) -> f16 {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(frintm))]
pub fn vrndm_f16(a: float16x4_t) -> float16x4_t {
@@ -23760,7 +23760,7 @@ pub fn vrndm_f16(a: float16x4_t) -> float16x4_t {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(frintm))]
pub fn vrndmq_f16(a: float16x8_t) -> float16x8_t {
@@ -23881,7 +23881,7 @@ pub fn vrndns_f32(a: f32) -> f32 {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(frintp))]
pub fn vrndp_f16(a: float16x4_t) -> float16x4_t {
@@ -23891,7 +23891,7 @@ pub fn vrndp_f16(a: float16x4_t) -> float16x4_t {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(frintp))]
pub fn vrndpq_f16(a: float16x8_t) -> float16x8_t {
@@ -23947,7 +23947,7 @@ pub fn vrndph_f16(a: f16) -> f16 {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(frintx))]
pub fn vrndx_f16(a: float16x4_t) -> float16x4_t {
@@ -23957,7 +23957,7 @@ pub fn vrndx_f16(a: float16x4_t) -> float16x4_t {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(test, assert_instr(frintx))]
pub fn vrndxq_f16(a: float16x8_t) -> float16x8_t {
@@ -25460,7 +25460,7 @@ pub fn vsqadds_u32(a: u32, b: i32) -> u32 {
#[inline(always)]
#[cfg_attr(test, assert_instr(fsqrt))]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vsqrt_f16(a: float16x4_t) -> float16x4_t {
unsafe { simd_fsqrt(a) }
@@ -25470,7 +25470,7 @@ pub fn vsqrt_f16(a: float16x4_t) -> float16x4_t {
#[inline(always)]
#[cfg_attr(test, assert_instr(fsqrt))]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
pub fn vsqrtq_f16(a: float16x8_t) -> float16x8_t {
unsafe { simd_fsqrt(a) }
@@ -28031,7 +28031,7 @@ pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
pub fn vtrn1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
@@ -28041,7 +28041,7 @@ pub fn vtrn1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
pub fn vtrn1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
@@ -28267,7 +28267,7 @@ pub fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
pub fn vtrn2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
@@ -28277,7 +28277,7 @@ pub fn vtrn2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
pub fn vtrn2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
@@ -28777,7 +28777,7 @@ pub fn vuqadds_s32(a: i32, b: u32) -> i32 {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
pub fn vuzp1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
@@ -28787,7 +28787,7 @@ pub fn vuzp1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
pub fn vuzp1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
@@ -29013,7 +29013,7 @@ pub fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
pub fn vuzp2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
@@ -29023,7 +29023,7 @@ pub fn vuzp2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
pub fn vuzp2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
@@ -29267,7 +29267,7 @@ pub fn vxarq_u64<const IMM6: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
pub fn vzip1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
@@ -29277,7 +29277,7 @@ pub fn vzip1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
pub fn vzip1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
@@ -29503,7 +29503,7 @@ pub fn vzip1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
pub fn vzip2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
@@ -29513,7 +29513,7 @@ pub fn vzip2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f16)"]
#[inline(always)]
#[target_feature(enable = "neon,fp16")]
#[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
#[cfg(not(target_arch = "arm64ec"))]
#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
pub fn vzip2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
@@ -821,7 +821,7 @@ pub fn vabaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -851,7 +851,7 @@ pub fn vabd_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -1422,7 +1422,7 @@ pub fn vabdl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -1444,7 +1444,7 @@ pub fn vabs_f16(a: float16x4_t) -> float16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -1673,7 +1673,7 @@ pub fn vabsh_f16(a: f16) -> f16 {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -1695,7 +1695,7 @@ pub fn vadd_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -3879,7 +3879,7 @@ pub fn vbicq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -3907,7 +3907,7 @@ pub fn vbsl_f16(a: uint16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -4529,7 +4529,7 @@ pub fn vbslq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -4559,7 +4559,7 @@ pub fn vcage_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -4647,7 +4647,7 @@ pub fn vcageq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -4677,7 +4677,7 @@ pub fn vcagt_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -4765,7 +4765,7 @@ pub fn vcagtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -4787,7 +4787,7 @@ pub fn vcale_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -4851,7 +4851,7 @@ pub fn vcaleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -4873,7 +4873,7 @@ pub fn vcalt_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -4937,7 +4937,7 @@ pub fn vcaltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -4959,7 +4959,7 @@ pub fn vceq_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -5317,7 +5317,7 @@ pub fn vceqq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -5339,7 +5339,7 @@ pub fn vcge_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -5655,7 +5655,7 @@ pub fn vcgeq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -5678,7 +5678,7 @@ pub fn vcgez_f16(a: float16x4_t) -> uint16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -5701,7 +5701,7 @@ pub fn vcgezq_f16(a: float16x8_t) -> uint16x8_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -5723,7 +5723,7 @@ pub fn vcgt_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -6039,7 +6039,7 @@ pub fn vcgtq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -6062,7 +6062,7 @@ pub fn vcgtz_f16(a: float16x4_t) -> uint16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -6085,7 +6085,7 @@ pub fn vcgtzq_f16(a: float16x8_t) -> uint16x8_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -6107,7 +6107,7 @@ pub fn vcle_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -6423,7 +6423,7 @@ pub fn vcleq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -6446,7 +6446,7 @@ pub fn vclez_f16(a: float16x4_t) -> uint16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -6769,7 +6769,7 @@ pub fn vclsq_u32(a: uint32x4_t) -> int32x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -6791,7 +6791,7 @@ pub fn vclt_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -7107,7 +7107,7 @@ pub fn vcltq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -7130,7 +7130,7 @@ pub fn vcltz_f16(a: float16x4_t) -> uint16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -7812,7 +7812,7 @@ pub fn vcntq_p8(a: poly8x16_t) -> poly8x16_t {
#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -8041,7 +8041,7 @@ pub fn vcombine_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x2_t {
#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -8065,7 +8065,7 @@ pub fn vcreate_f16(a: u64) -> float16x4_t {
#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -8577,7 +8577,7 @@ pub fn vcreate_p64(a: u64) -> poly64x1_t {
#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -8599,7 +8599,7 @@ pub fn vcvt_f16_f32(a: float32x4_t) -> float16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -8621,7 +8621,7 @@ pub fn vcvt_f16_s16(a: int16x4_t) -> float16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -8643,7 +8643,7 @@ pub fn vcvtq_f16_s16(a: int16x8_t) -> float16x8_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -8665,7 +8665,7 @@ pub fn vcvt_f16_u16(a: uint16x4_t) -> float16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -8688,7 +8688,7 @@ pub fn vcvtq_f16_u16(a: uint16x8_t) -> float16x8_t {
#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -8795,7 +8795,7 @@ pub fn vcvtq_f32_u32(a: uint32x4_t) -> float32x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -8830,7 +8830,7 @@ pub fn vcvt_n_f16_s16<const N: i32>(a: int16x4_t) -> float16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -8865,7 +8865,7 @@ pub fn vcvtq_n_f16_s16<const N: i32>(a: int16x8_t) -> float16x8_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -8900,7 +8900,7 @@ pub fn vcvt_n_f16_u16<const N: i32>(a: uint16x4_t) -> float16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -9087,7 +9087,7 @@ pub fn vcvtq_n_f32_u32<const N: i32>(a: uint32x4_t) -> float32x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -9122,7 +9122,7 @@ pub fn vcvt_n_s16_f16<const N: i32>(a: float16x4_t) -> int16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -9233,7 +9233,7 @@ pub fn vcvtq_n_s32_f32<const N: i32>(a: float32x4_t) -> int32x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -9268,7 +9268,7 @@ pub fn vcvt_n_u16_f16<const N: i32>(a: float16x4_t) -> uint16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -9378,7 +9378,7 @@ pub fn vcvtq_n_u32_f32<const N: i32>(a: float32x4_t) -> uint32x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -9400,7 +9400,7 @@ pub fn vcvt_s16_f16(a: float16x4_t) -> int16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -9480,7 +9480,7 @@ pub fn vcvtq_s32_f32(a: float32x4_t) -> int32x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -9502,7 +9502,7 @@ pub fn vcvt_u16_f16(a: float16x4_t) -> uint16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -10140,7 +10140,7 @@ pub fn vdotq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t {
#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -10165,7 +10165,7 @@ pub fn vdup_lane_f16<const N: i32>(a: float16x4_t) -> float16x4_t {
#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -10719,7 +10719,7 @@ pub fn vdup_lane_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t {
#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -10744,7 +10744,7 @@ pub fn vdup_laneq_f16<const N: i32>(a: float16x8_t) -> float16x4_t {
#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -12261,7 +12261,7 @@ pub fn veorq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -12640,7 +12640,7 @@ pub fn vextq_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -13228,7 +13228,7 @@ pub fn vextq_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -13250,7 +13250,7 @@ pub fn vfma_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -13357,7 +13357,7 @@ pub fn vfmaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -13383,7 +13383,7 @@ pub fn vfms_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -13494,7 +13494,7 @@ pub fn vfmsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t {
#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -13513,7 +13513,7 @@ pub fn vget_high_f16(a: float16x8_t) -> float16x4_t {
#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -26747,7 +26747,7 @@ pub unsafe fn vldrq_p128(a: *const p128) -> p128 {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -26777,7 +26777,7 @@ pub fn vmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -27153,7 +27153,7 @@ pub fn vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -27175,7 +27175,7 @@ pub fn vmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -27239,7 +27239,7 @@ pub fn vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -27269,7 +27269,7 @@ pub fn vmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -27645,7 +27645,7 @@ pub fn vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -27667,7 +27667,7 @@ pub fn vminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -31603,7 +31603,7 @@ pub fn vmovn_u64(a: uint64x2_t) -> uint32x2_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -31625,7 +31625,7 @@ pub fn vmul_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -31690,7 +31690,7 @@ pub fn vmulq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -31719,7 +31719,7 @@ pub fn vmul_lane_f16<const LANE: i32>(a: float16x4_t, v: float16x4_t) -> float16
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -33691,7 +33691,7 @@ pub fn vmvnq_u8(a: uint8x16_t) -> uint8x16_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -33713,7 +33713,7 @@ pub fn vneg_f16(a: float16x4_t) -> float16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -34951,7 +34951,7 @@ pub fn vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -41033,7 +41033,7 @@ pub fn vraddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -41063,7 +41063,7 @@ pub fn vrecpe_f16(a: float16x4_t) -> float16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -41209,7 +41209,7 @@ pub fn vrecpeq_u32(a: uint32x4_t) -> uint32x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -41239,7 +41239,7 @@ pub fn vrecps_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -41328,7 +41328,7 @@ pub fn vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -41351,7 +41351,7 @@ pub fn vreinterpret_f32_f16(a: float16x4_t) -> float32x2_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -41378,7 +41378,7 @@ pub fn vreinterpret_f32_f16(a: float16x4_t) -> float32x2_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -41401,7 +41401,7 @@ pub fn vreinterpret_s8_f16(a: float16x4_t) -> int8x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -41428,7 +41428,7 @@ pub fn vreinterpret_s8_f16(a: float16x4_t) -> int8x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -41451,7 +41451,7 @@ pub fn vreinterpret_s16_f16(a: float16x4_t) -> int16x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -41478,7 +41478,7 @@ pub fn vreinterpret_s16_f16(a: float16x4_t) -> int16x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -41501,7 +41501,7 @@ pub fn vreinterpret_s32_f16(a: float16x4_t) -> int32x2_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -41528,7 +41528,7 @@ pub fn vreinterpret_s32_f16(a: float16x4_t) -> int32x2_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -41551,7 +41551,7 @@ pub fn vreinterpret_s64_f16(a: float16x4_t) -> int64x1_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -41575,7 +41575,7 @@ pub fn vreinterpret_s64_f16(a: float16x4_t) -> int64x1_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -41598,7 +41598,7 @@ pub fn vreinterpret_u8_f16(a: float16x4_t) -> uint8x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -41625,7 +41625,7 @@ pub fn vreinterpret_u8_f16(a: float16x4_t) -> uint8x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -41648,7 +41648,7 @@ pub fn vreinterpret_u16_f16(a: float16x4_t) -> uint16x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -41675,7 +41675,7 @@ pub fn vreinterpret_u16_f16(a: float16x4_t) -> uint16x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -41698,7 +41698,7 @@ pub fn vreinterpret_u32_f16(a: float16x4_t) -> uint32x2_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -41725,7 +41725,7 @@ pub fn vreinterpret_u32_f16(a: float16x4_t) -> uint32x2_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -41748,7 +41748,7 @@ pub fn vreinterpret_u64_f16(a: float16x4_t) -> uint64x1_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -41772,7 +41772,7 @@ pub fn vreinterpret_u64_f16(a: float16x4_t) -> uint64x1_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -41795,7 +41795,7 @@ pub fn vreinterpret_p8_f16(a: float16x4_t) -> poly8x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -41822,7 +41822,7 @@ pub fn vreinterpret_p8_f16(a: float16x4_t) -> poly8x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -41845,7 +41845,7 @@ pub fn vreinterpret_p16_f16(a: float16x4_t) -> poly16x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -41872,7 +41872,7 @@ pub fn vreinterpret_p16_f16(a: float16x4_t) -> poly16x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -41895,7 +41895,7 @@ pub fn vreinterpretq_f32_f16(a: float16x8_t) -> float32x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -41922,7 +41922,7 @@ pub fn vreinterpretq_f32_f16(a: float16x8_t) -> float32x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -41945,7 +41945,7 @@ pub fn vreinterpretq_s8_f16(a: float16x8_t) -> int8x16_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -41976,7 +41976,7 @@ pub fn vreinterpretq_s8_f16(a: float16x8_t) -> int8x16_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -41999,7 +41999,7 @@ pub fn vreinterpretq_s16_f16(a: float16x8_t) -> int16x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42026,7 +42026,7 @@ pub fn vreinterpretq_s16_f16(a: float16x8_t) -> int16x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42049,7 +42049,7 @@ pub fn vreinterpretq_s32_f16(a: float16x8_t) -> int32x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42076,7 +42076,7 @@ pub fn vreinterpretq_s32_f16(a: float16x8_t) -> int32x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42099,7 +42099,7 @@ pub fn vreinterpretq_s64_f16(a: float16x8_t) -> int64x2_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42126,7 +42126,7 @@ pub fn vreinterpretq_s64_f16(a: float16x8_t) -> int64x2_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42149,7 +42149,7 @@ pub fn vreinterpretq_u8_f16(a: float16x8_t) -> uint8x16_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42180,7 +42180,7 @@ pub fn vreinterpretq_u8_f16(a: float16x8_t) -> uint8x16_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42203,7 +42203,7 @@ pub fn vreinterpretq_u16_f16(a: float16x8_t) -> uint16x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42230,7 +42230,7 @@ pub fn vreinterpretq_u16_f16(a: float16x8_t) -> uint16x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42253,7 +42253,7 @@ pub fn vreinterpretq_u32_f16(a: float16x8_t) -> uint32x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42280,7 +42280,7 @@ pub fn vreinterpretq_u32_f16(a: float16x8_t) -> uint32x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42303,7 +42303,7 @@ pub fn vreinterpretq_u64_f16(a: float16x8_t) -> uint64x2_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42330,7 +42330,7 @@ pub fn vreinterpretq_u64_f16(a: float16x8_t) -> uint64x2_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42353,7 +42353,7 @@ pub fn vreinterpretq_p8_f16(a: float16x8_t) -> poly8x16_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42384,7 +42384,7 @@ pub fn vreinterpretq_p8_f16(a: float16x8_t) -> poly8x16_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42407,7 +42407,7 @@ pub fn vreinterpretq_p16_f16(a: float16x8_t) -> poly16x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42434,7 +42434,7 @@ pub fn vreinterpretq_p16_f16(a: float16x8_t) -> poly16x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42457,7 +42457,7 @@ pub fn vreinterpret_f16_f32(a: float32x2_t) -> float16x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42484,7 +42484,7 @@ pub fn vreinterpret_f16_f32(a: float32x2_t) -> float16x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42507,7 +42507,7 @@ pub fn vreinterpretq_f16_f32(a: float32x4_t) -> float16x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42534,7 +42534,7 @@ pub fn vreinterpretq_f16_f32(a: float32x4_t) -> float16x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42557,7 +42557,7 @@ pub fn vreinterpret_f16_s8(a: int8x8_t) -> float16x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42584,7 +42584,7 @@ pub fn vreinterpret_f16_s8(a: int8x8_t) -> float16x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42607,7 +42607,7 @@ pub fn vreinterpretq_f16_s8(a: int8x16_t) -> float16x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42635,7 +42635,7 @@ pub fn vreinterpretq_f16_s8(a: int8x16_t) -> float16x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42658,7 +42658,7 @@ pub fn vreinterpret_f16_s16(a: int16x4_t) -> float16x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42685,7 +42685,7 @@ pub fn vreinterpret_f16_s16(a: int16x4_t) -> float16x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42708,7 +42708,7 @@ pub fn vreinterpretq_f16_s16(a: int16x8_t) -> float16x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42735,7 +42735,7 @@ pub fn vreinterpretq_f16_s16(a: int16x8_t) -> float16x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42758,7 +42758,7 @@ pub fn vreinterpret_f16_s32(a: int32x2_t) -> float16x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42785,7 +42785,7 @@ pub fn vreinterpret_f16_s32(a: int32x2_t) -> float16x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42808,7 +42808,7 @@ pub fn vreinterpretq_f16_s32(a: int32x4_t) -> float16x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42835,7 +42835,7 @@ pub fn vreinterpretq_f16_s32(a: int32x4_t) -> float16x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42858,7 +42858,7 @@ pub fn vreinterpret_f16_s64(a: int64x1_t) -> float16x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42884,7 +42884,7 @@ pub fn vreinterpret_f16_s64(a: int64x1_t) -> float16x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42907,7 +42907,7 @@ pub fn vreinterpretq_f16_s64(a: int64x2_t) -> float16x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42934,7 +42934,7 @@ pub fn vreinterpretq_f16_s64(a: int64x2_t) -> float16x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42957,7 +42957,7 @@ pub fn vreinterpret_f16_u8(a: uint8x8_t) -> float16x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -42984,7 +42984,7 @@ pub fn vreinterpret_f16_u8(a: uint8x8_t) -> float16x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -43007,7 +43007,7 @@ pub fn vreinterpretq_f16_u8(a: uint8x16_t) -> float16x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -43035,7 +43035,7 @@ pub fn vreinterpretq_f16_u8(a: uint8x16_t) -> float16x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -43058,7 +43058,7 @@ pub fn vreinterpret_f16_u16(a: uint16x4_t) -> float16x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -43085,7 +43085,7 @@ pub fn vreinterpret_f16_u16(a: uint16x4_t) -> float16x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -43108,7 +43108,7 @@ pub fn vreinterpretq_f16_u16(a: uint16x8_t) -> float16x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -43135,7 +43135,7 @@ pub fn vreinterpretq_f16_u16(a: uint16x8_t) -> float16x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -43158,7 +43158,7 @@ pub fn vreinterpret_f16_u32(a: uint32x2_t) -> float16x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -43185,7 +43185,7 @@ pub fn vreinterpret_f16_u32(a: uint32x2_t) -> float16x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -43208,7 +43208,7 @@ pub fn vreinterpretq_f16_u32(a: uint32x4_t) -> float16x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -43235,7 +43235,7 @@ pub fn vreinterpretq_f16_u32(a: uint32x4_t) -> float16x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -43258,7 +43258,7 @@ pub fn vreinterpret_f16_u64(a: uint64x1_t) -> float16x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -43284,7 +43284,7 @@ pub fn vreinterpret_f16_u64(a: uint64x1_t) -> float16x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -43307,7 +43307,7 @@ pub fn vreinterpretq_f16_u64(a: uint64x2_t) -> float16x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -43334,7 +43334,7 @@ pub fn vreinterpretq_f16_u64(a: uint64x2_t) -> float16x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -43357,7 +43357,7 @@ pub fn vreinterpret_f16_p8(a: poly8x8_t) -> float16x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -43384,7 +43384,7 @@ pub fn vreinterpret_f16_p8(a: poly8x8_t) -> float16x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -43407,7 +43407,7 @@ pub fn vreinterpretq_f16_p8(a: poly8x16_t) -> float16x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -43435,7 +43435,7 @@ pub fn vreinterpretq_f16_p8(a: poly8x16_t) -> float16x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -43458,7 +43458,7 @@ pub fn vreinterpret_f16_p16(a: poly16x4_t) -> float16x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -43485,7 +43485,7 @@ pub fn vreinterpret_f16_p16(a: poly16x4_t) -> float16x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -43508,7 +43508,7 @@ pub fn vreinterpretq_f16_p16(a: poly16x8_t) -> float16x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -43535,7 +43535,7 @@ pub fn vreinterpretq_f16_p16(a: poly16x8_t) -> float16x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -43558,7 +43558,7 @@ pub fn vreinterpretq_f16_p128(a: p128) -> float16x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -43584,7 +43584,7 @@ pub fn vreinterpretq_f16_p128(a: p128) -> float16x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -43607,7 +43607,7 @@ pub fn vreinterpret_p64_f16(a: float16x4_t) -> poly64x1_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -43631,7 +43631,7 @@ pub fn vreinterpret_p64_f16(a: float16x4_t) -> poly64x1_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -43654,7 +43654,7 @@ pub fn vreinterpretq_p128_f16(a: float16x8_t) -> p128 {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -43678,7 +43678,7 @@ pub fn vreinterpretq_p128_f16(a: float16x8_t) -> p128 {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -43701,7 +43701,7 @@ pub fn vreinterpretq_p64_f16(a: float16x8_t) -> poly64x2_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -43728,7 +43728,7 @@ pub fn vreinterpretq_p64_f16(a: float16x8_t) -> poly64x2_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -43751,7 +43751,7 @@ pub fn vreinterpret_f16_p64(a: poly64x1_t) -> float16x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -43777,7 +43777,7 @@ pub fn vreinterpret_f16_p64(a: poly64x1_t) -> float16x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -43800,7 +43800,7 @@ pub fn vreinterpretq_f16_p64(a: poly64x2_t) -> float16x8_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -57804,7 +57804,7 @@ pub fn vrev64q_u8(a: uint8x16_t) -> uint8x16_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -57826,7 +57826,7 @@ pub fn vrev64_f16(a: float16x4_t) -> float16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -58196,7 +58196,7 @@ pub fn vrhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -58225,7 +58225,7 @@ pub fn vrndn_f16(a: float16x4_t) -> float16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -59316,7 +59316,7 @@ pub fn vrshrn_n_u64<const N: i32>(a: uint64x2_t) -> uint32x2_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -59346,7 +59346,7 @@ pub fn vrsqrte_f16(a: float16x4_t) -> float16x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -59492,7 +59492,7 @@ pub fn vrsqrteq_u32(a: uint32x4_t) -> uint32x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -59522,7 +59522,7 @@ pub fn vrsqrts_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -71076,7 +71076,7 @@ pub unsafe fn vstrq_p128(a: *mut p128, b: p128) {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -71098,7 +71098,7 @@ pub fn vsub_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -73079,7 +73079,7 @@ pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -73109,7 +73109,7 @@ pub fn vtrn_f16(a: float16x4_t, b: float16x4_t) -> float16x4x2_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -74392,7 +74392,7 @@ pub fn vusmmlaq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -74422,7 +74422,7 @@ pub fn vuzp_f16(a: float16x4_t, b: float16x4_t) -> float16x4x2_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -74998,7 +74998,7 @@ pub fn vuzpq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -75028,7 +75028,7 @@ pub fn vzip_f16(a: float16x4_t, b: float16x4_t) -> float16x4x2_t {
#[target_feature(enable = "neon,fp16")]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -104,7 +104,7 @@ fn as_signed(self) -> $signed {
}
types! {
#![cfg_attr(not(target_arch = "arm"), stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION"))]
#![cfg_attr(not(target_arch = "arm"), stable(feature = "stdarch_neon_fp16", since = "1.94.0"))]
#![cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))]
/// Arm-specific 64-bit wide vector of four packed `f16`.
@@ -750,7 +750,7 @@ pub struct uint32x4x4_t(
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -763,7 +763,7 @@ pub struct uint32x4x4_t(
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -776,7 +776,7 @@ pub struct uint32x4x4_t(
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -794,7 +794,7 @@ pub struct float16x4x4_t(
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -807,7 +807,7 @@ pub struct float16x4x4_t(
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -820,7 +820,7 @@ pub struct float16x4x4_t(
#[derive(Copy, Clone, Debug)]
#[cfg_attr(
not(target_arch = "arm"),
stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")
stable(feature = "stdarch_neon_fp16", since = "1.94.0")
)]
#[cfg_attr(
target_arch = "arm",
@@ -247,7 +247,7 @@ pub const fn _mm512_setr_ph(
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_setzero_ph)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_setzero_ph() -> __m128h {
unsafe { transmute(f16x8::ZERO) }
@@ -258,7 +258,7 @@ pub const fn _mm_setzero_ph() -> __m128h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_setzero_ph)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_setzero_ph() -> __m256h {
f16x16::ZERO.as_m256h()
@@ -269,7 +269,7 @@ pub const fn _mm256_setzero_ph() -> __m256h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_setzero_ph)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_setzero_ph() -> __m512h {
f16x32::ZERO.as_m512h()
@@ -283,7 +283,7 @@ pub const fn _mm512_setzero_ph() -> __m512h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_undefined_ph)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_undefined_ph() -> __m128h {
f16x8::ZERO.as_m128h()
@@ -297,7 +297,7 @@ pub const fn _mm_undefined_ph() -> __m128h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_undefined_ph)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_undefined_ph() -> __m256h {
f16x16::ZERO.as_m256h()
@@ -311,7 +311,7 @@ pub const fn _mm256_undefined_ph() -> __m256h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_undefined_ph)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_undefined_ph() -> __m512h {
f16x32::ZERO.as_m512h()
@@ -323,7 +323,7 @@ pub const fn _mm512_undefined_ph() -> __m512h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castpd_ph)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_castpd_ph(a: __m128d) -> __m128h {
unsafe { transmute(a) }
@@ -335,7 +335,7 @@ pub const fn _mm_castpd_ph(a: __m128d) -> __m128h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castpd_ph)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_castpd_ph(a: __m256d) -> __m256h {
unsafe { transmute(a) }
@@ -347,7 +347,7 @@ pub const fn _mm256_castpd_ph(a: __m256d) -> __m256h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_castpd_ph)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_castpd_ph(a: __m512d) -> __m512h {
unsafe { transmute(a) }
@@ -359,7 +359,7 @@ pub const fn _mm512_castpd_ph(a: __m512d) -> __m512h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castph_pd)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_castph_pd(a: __m128h) -> __m128d {
unsafe { transmute(a) }
@@ -371,7 +371,7 @@ pub const fn _mm_castph_pd(a: __m128h) -> __m128d {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castph_pd)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_castph_pd(a: __m256h) -> __m256d {
unsafe { transmute(a) }
@@ -383,7 +383,7 @@ pub const fn _mm256_castph_pd(a: __m256h) -> __m256d {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_castph_pd)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_castph_pd(a: __m512h) -> __m512d {
unsafe { transmute(a) }
@@ -395,7 +395,7 @@ pub const fn _mm512_castph_pd(a: __m512h) -> __m512d {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castps_ph)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_castps_ph(a: __m128) -> __m128h {
unsafe { transmute(a) }
@@ -407,7 +407,7 @@ pub const fn _mm_castps_ph(a: __m128) -> __m128h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castps_ph)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_castps_ph(a: __m256) -> __m256h {
unsafe { transmute(a) }
@@ -419,7 +419,7 @@ pub const fn _mm256_castps_ph(a: __m256) -> __m256h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_castps_ph)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_castps_ph(a: __m512) -> __m512h {
unsafe { transmute(a) }
@@ -431,7 +431,7 @@ pub const fn _mm512_castps_ph(a: __m512) -> __m512h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castph_ps)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_castph_ps(a: __m128h) -> __m128 {
unsafe { transmute(a) }
@@ -443,7 +443,7 @@ pub const fn _mm_castph_ps(a: __m128h) -> __m128 {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castph_ps)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_castph_ps(a: __m256h) -> __m256 {
unsafe { transmute(a) }
@@ -455,7 +455,7 @@ pub const fn _mm256_castph_ps(a: __m256h) -> __m256 {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_castph_ps)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_castph_ps(a: __m512h) -> __m512 {
unsafe { transmute(a) }
@@ -467,7 +467,7 @@ pub const fn _mm512_castph_ps(a: __m512h) -> __m512 {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castsi128_ph)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_castsi128_ph(a: __m128i) -> __m128h {
unsafe { transmute(a) }
@@ -479,7 +479,7 @@ pub const fn _mm_castsi128_ph(a: __m128i) -> __m128h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castsi256_ph)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_castsi256_ph(a: __m256i) -> __m256h {
unsafe { transmute(a) }
@@ -491,7 +491,7 @@ pub const fn _mm256_castsi256_ph(a: __m256i) -> __m256h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_castsi512_ph)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_castsi512_ph(a: __m512i) -> __m512h {
unsafe { transmute(a) }
@@ -503,7 +503,7 @@ pub const fn _mm512_castsi512_ph(a: __m512i) -> __m512h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castph_si128)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_castph_si128(a: __m128h) -> __m128i {
unsafe { transmute(a) }
@@ -515,7 +515,7 @@ pub const fn _mm_castph_si128(a: __m128h) -> __m128i {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castph_si256)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_castph_si256(a: __m256h) -> __m256i {
unsafe { transmute(a) }
@@ -527,7 +527,7 @@ pub const fn _mm256_castph_si256(a: __m256h) -> __m256i {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_castph_si512)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_castph_si512(a: __m512h) -> __m512i {
unsafe { transmute(a) }
@@ -539,7 +539,7 @@ pub const fn _mm512_castph_si512(a: __m512h) -> __m512i {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castph256_ph128)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_castph256_ph128(a: __m256h) -> __m128h {
unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) }
@@ -551,7 +551,7 @@ pub const fn _mm256_castph256_ph128(a: __m256h) -> __m128h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_castph512_ph128)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_castph512_ph128(a: __m512h) -> __m128h {
unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) }
@@ -563,7 +563,7 @@ pub const fn _mm512_castph512_ph128(a: __m512h) -> __m128h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_castph512_ph256)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_castph512_ph256(a: __m512h) -> __m256h {
unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
@@ -576,7 +576,7 @@ pub const fn _mm512_castph512_ph256(a: __m512h) -> __m256h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castph128_ph256)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_castph128_ph256(a: __m128h) -> __m256h {
unsafe {
@@ -595,7 +595,7 @@ pub const fn _mm256_castph128_ph256(a: __m128h) -> __m256h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_castph128_ph512)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_castph128_ph512(a: __m128h) -> __m512h {
unsafe {
@@ -617,7 +617,7 @@ pub const fn _mm512_castph128_ph512(a: __m128h) -> __m512h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_castph256_ph512)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_castph256_ph512(a: __m256h) -> __m512h {
unsafe {
@@ -639,7 +639,7 @@ pub const fn _mm512_castph256_ph512(a: __m256h) -> __m512h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_zextph128_ph256)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_zextph128_ph256(a: __m128h) -> __m256h {
unsafe {
@@ -658,7 +658,7 @@ pub const fn _mm256_zextph128_ph256(a: __m128h) -> __m256h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_zextph256_ph512)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_zextph256_ph512(a: __m256h) -> __m512h {
unsafe {
@@ -680,7 +680,7 @@ pub const fn _mm512_zextph256_ph512(a: __m256h) -> __m512h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_zextph128_ph512)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_zextph128_ph512(a: __m128h) -> __m512h {
unsafe {
@@ -730,7 +730,7 @@ macro_rules! cmp_asm { // FIXME: use LLVM intrinsics
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cmp_ph_mask<const IMM5: i32>(a: __m128h, b: __m128h) -> __mmask8 {
unsafe {
static_assert_uimm_bits!(IMM5, 5);
@@ -746,7 +746,7 @@ pub fn _mm_cmp_ph_mask<const IMM5: i32>(a: __m128h, b: __m128h) -> __mmask8 {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cmp_ph_mask<const IMM5: i32>(k1: __mmask8, a: __m128h, b: __m128h) -> __mmask8 {
unsafe {
static_assert_uimm_bits!(IMM5, 5);
@@ -761,7 +761,7 @@ pub fn _mm_mask_cmp_ph_mask<const IMM5: i32>(k1: __mmask8, a: __m128h, b: __m128
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_cmp_ph_mask<const IMM5: i32>(a: __m256h, b: __m256h) -> __mmask16 {
unsafe {
static_assert_uimm_bits!(IMM5, 5);
@@ -777,7 +777,7 @@ pub fn _mm256_cmp_ph_mask<const IMM5: i32>(a: __m256h, b: __m256h) -> __mmask16
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_cmp_ph_mask<const IMM5: i32>(
k1: __mmask16,
a: __m256h,
@@ -796,7 +796,7 @@ pub fn _mm256_mask_cmp_ph_mask<const IMM5: i32>(
#[inline]
#[target_feature(enable = "avx512fp16")]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cmp_ph_mask<const IMM5: i32>(a: __m512h, b: __m512h) -> __mmask32 {
unsafe {
static_assert_uimm_bits!(IMM5, 5);
@@ -812,7 +812,7 @@ pub fn _mm512_cmp_ph_mask<const IMM5: i32>(a: __m512h, b: __m512h) -> __mmask32
#[inline]
#[target_feature(enable = "avx512fp16")]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cmp_ph_mask<const IMM5: i32>(
k1: __mmask32,
a: __m512h,
@@ -833,7 +833,7 @@ pub fn _mm512_mask_cmp_ph_mask<const IMM5: i32>(
#[inline]
#[target_feature(enable = "avx512fp16")]
#[rustc_legacy_const_generics(2, 3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cmp_round_ph_mask<const IMM5: i32, const SAE: i32>(
a: __m512h,
b: __m512h,
@@ -868,7 +868,7 @@ pub fn _mm512_cmp_round_ph_mask<const IMM5: i32, const SAE: i32>(
#[inline]
#[target_feature(enable = "avx512fp16")]
#[rustc_legacy_const_generics(3, 4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cmp_round_ph_mask<const IMM5: i32, const SAE: i32>(
k1: __mmask32,
a: __m512h,
@@ -903,7 +903,7 @@ pub fn _mm512_mask_cmp_round_ph_mask<const IMM5: i32, const SAE: i32>(
#[inline]
#[target_feature(enable = "avx512fp16")]
#[rustc_legacy_const_generics(2, 3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cmp_round_sh_mask<const IMM5: i32, const SAE: i32>(a: __m128h, b: __m128h) -> __mmask8 {
static_assert_uimm_bits!(IMM5, 5);
static_assert_sae!(SAE);
@@ -918,7 +918,7 @@ pub fn _mm_cmp_round_sh_mask<const IMM5: i32, const SAE: i32>(a: __m128h, b: __m
#[inline]
#[target_feature(enable = "avx512fp16")]
#[rustc_legacy_const_generics(3, 4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cmp_round_sh_mask<const IMM5: i32, const SAE: i32>(
k1: __mmask8,
a: __m128h,
@@ -938,7 +938,7 @@ pub fn _mm_mask_cmp_round_sh_mask<const IMM5: i32, const SAE: i32>(
#[inline]
#[target_feature(enable = "avx512fp16")]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cmp_sh_mask<const IMM5: i32>(a: __m128h, b: __m128h) -> __mmask8 {
static_assert_uimm_bits!(IMM5, 5);
_mm_cmp_round_sh_mask::<IMM5, _MM_FROUND_CUR_DIRECTION>(a, b)
@@ -951,7 +951,7 @@ pub fn _mm_cmp_sh_mask<const IMM5: i32>(a: __m128h, b: __m128h) -> __mmask8 {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cmp_sh_mask<const IMM5: i32>(k1: __mmask8, a: __m128h, b: __m128h) -> __mmask8 {
static_assert_uimm_bits!(IMM5, 5);
_mm_mask_cmp_round_sh_mask::<IMM5, _MM_FROUND_CUR_DIRECTION>(k1, a, b)
@@ -965,7 +965,7 @@ pub fn _mm_mask_cmp_sh_mask<const IMM5: i32>(k1: __mmask8, a: __m128h, b: __m128
#[inline]
#[target_feature(enable = "avx512fp16")]
#[rustc_legacy_const_generics(2, 3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_comi_round_sh<const IMM5: i32, const SAE: i32>(a: __m128h, b: __m128h) -> i32 {
unsafe {
static_assert_uimm_bits!(IMM5, 5);
@@ -981,7 +981,7 @@ pub fn _mm_comi_round_sh<const IMM5: i32, const SAE: i32>(a: __m128h, b: __m128h
#[inline]
#[target_feature(enable = "avx512fp16")]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_comi_sh<const IMM5: i32>(a: __m128h, b: __m128h) -> i32 {
static_assert_uimm_bits!(IMM5, 5);
_mm_comi_round_sh::<IMM5, _MM_FROUND_CUR_DIRECTION>(a, b)
@@ -993,7 +993,7 @@ pub fn _mm_comi_sh<const IMM5: i32>(a: __m128h, b: __m128h) -> i32 {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comieq_sh)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_comieq_sh(a: __m128h, b: __m128h) -> i32 {
_mm_comi_sh::<_CMP_EQ_OS>(a, b)
}
@@ -1004,7 +1004,7 @@ pub fn _mm_comieq_sh(a: __m128h, b: __m128h) -> i32 {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comige_sh)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_comige_sh(a: __m128h, b: __m128h) -> i32 {
_mm_comi_sh::<_CMP_GE_OS>(a, b)
}
@@ -1015,7 +1015,7 @@ pub fn _mm_comige_sh(a: __m128h, b: __m128h) -> i32 {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comigt_sh)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_comigt_sh(a: __m128h, b: __m128h) -> i32 {
_mm_comi_sh::<_CMP_GT_OS>(a, b)
}
@@ -1026,7 +1026,7 @@ pub fn _mm_comigt_sh(a: __m128h, b: __m128h) -> i32 {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comile_sh)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_comile_sh(a: __m128h, b: __m128h) -> i32 {
_mm_comi_sh::<_CMP_LE_OS>(a, b)
}
@@ -1037,7 +1037,7 @@ pub fn _mm_comile_sh(a: __m128h, b: __m128h) -> i32 {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comilt_sh)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_comilt_sh(a: __m128h, b: __m128h) -> i32 {
_mm_comi_sh::<_CMP_LT_OS>(a, b)
}
@@ -1048,7 +1048,7 @@ pub fn _mm_comilt_sh(a: __m128h, b: __m128h) -> i32 {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comineq_sh)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_comineq_sh(a: __m128h, b: __m128h) -> i32 {
_mm_comi_sh::<_CMP_NEQ_US>(a, b)
}
@@ -1059,7 +1059,7 @@ pub fn _mm_comineq_sh(a: __m128h, b: __m128h) -> i32 {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ucomieq_sh)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_ucomieq_sh(a: __m128h, b: __m128h) -> i32 {
_mm_comi_sh::<_CMP_EQ_OQ>(a, b)
}
@@ -1070,7 +1070,7 @@ pub fn _mm_ucomieq_sh(a: __m128h, b: __m128h) -> i32 {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ucomige_sh)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_ucomige_sh(a: __m128h, b: __m128h) -> i32 {
_mm_comi_sh::<_CMP_GE_OQ>(a, b)
}
@@ -1081,7 +1081,7 @@ pub fn _mm_ucomige_sh(a: __m128h, b: __m128h) -> i32 {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ucomigt_sh)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_ucomigt_sh(a: __m128h, b: __m128h) -> i32 {
_mm_comi_sh::<_CMP_GT_OQ>(a, b)
}
@@ -1092,7 +1092,7 @@ pub fn _mm_ucomigt_sh(a: __m128h, b: __m128h) -> i32 {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ucomile_sh)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_ucomile_sh(a: __m128h, b: __m128h) -> i32 {
_mm_comi_sh::<_CMP_LE_OQ>(a, b)
}
@@ -1103,7 +1103,7 @@ pub fn _mm_ucomile_sh(a: __m128h, b: __m128h) -> i32 {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ucomilt_sh)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_ucomilt_sh(a: __m128h, b: __m128h) -> i32 {
_mm_comi_sh::<_CMP_LT_OQ>(a, b)
}
@@ -1114,7 +1114,7 @@ pub fn _mm_ucomilt_sh(a: __m128h, b: __m128h) -> i32 {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ucomineq_sh)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_ucomineq_sh(a: __m128h, b: __m128h) -> i32 {
_mm_comi_sh::<_CMP_NEQ_UQ>(a, b)
}
@@ -1248,7 +1248,7 @@ pub unsafe fn _mm_maskz_load_sh(k: __mmask8, mem_addr: *const f16) -> __m128h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_move_sh)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_mask_move_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
unsafe {
@@ -1267,7 +1267,7 @@ pub const fn _mm_mask_move_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h)
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_move_sh)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_maskz_move_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
unsafe {
@@ -1285,7 +1285,7 @@ pub const fn _mm_maskz_move_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_move_sh)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_move_sh(a: __m128h, b: __m128h) -> __m128h {
unsafe {
@@ -1399,7 +1399,7 @@ pub unsafe fn _mm_mask_store_sh(mem_addr: *mut f16, k: __mmask8, a: __m128h) {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vaddph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_add_ph(a: __m128h, b: __m128h) -> __m128h {
unsafe { simd_add(a, b) }
@@ -1412,7 +1412,7 @@ pub const fn _mm_add_ph(a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vaddph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_mask_add_ph(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
unsafe {
@@ -1428,7 +1428,7 @@ pub const fn _mm_mask_add_ph(src: __m128h, k: __mmask8, a: __m128h, b: __m128h)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vaddph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_maskz_add_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
unsafe {
@@ -1443,7 +1443,7 @@ pub const fn _mm_maskz_add_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vaddph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_add_ph(a: __m256h, b: __m256h) -> __m256h {
unsafe { simd_add(a, b) }
@@ -1456,7 +1456,7 @@ pub const fn _mm256_add_ph(a: __m256h, b: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vaddph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_mask_add_ph(src: __m256h, k: __mmask16, a: __m256h, b: __m256h) -> __m256h {
unsafe {
@@ -1472,7 +1472,7 @@ pub const fn _mm256_mask_add_ph(src: __m256h, k: __mmask16, a: __m256h, b: __m25
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vaddph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_maskz_add_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256h {
unsafe {
@@ -1487,7 +1487,7 @@ pub const fn _mm256_maskz_add_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vaddph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_add_ph(a: __m512h, b: __m512h) -> __m512h {
unsafe { simd_add(a, b) }
@@ -1500,7 +1500,7 @@ pub const fn _mm512_add_ph(a: __m512h, b: __m512h) -> __m512h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vaddph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_mask_add_ph(src: __m512h, k: __mmask32, a: __m512h, b: __m512h) -> __m512h {
unsafe {
@@ -1516,7 +1516,7 @@ pub const fn _mm512_mask_add_ph(src: __m512h, k: __mmask32, a: __m512h, b: __m51
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vaddph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_maskz_add_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h {
unsafe {
@@ -1539,7 +1539,7 @@ pub const fn _mm512_maskz_add_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vaddph, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_add_round_ph<const ROUNDING: i32>(a: __m512h, b: __m512h) -> __m512h {
unsafe {
static_assert_rounding!(ROUNDING);
@@ -1562,7 +1562,7 @@ pub fn _mm512_add_round_ph<const ROUNDING: i32>(a: __m512h, b: __m512h) -> __m51
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vaddph, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_add_round_ph<const ROUNDING: i32>(
src: __m512h,
k: __mmask32,
@@ -1590,7 +1590,7 @@ pub fn _mm512_mask_add_round_ph<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vaddph, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_add_round_ph<const ROUNDING: i32>(
k: __mmask32,
a: __m512h,
@@ -1618,7 +1618,7 @@ pub fn _mm512_maskz_add_round_ph<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vaddsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_add_round_sh<const ROUNDING: i32>(a: __m128h, b: __m128h) -> __m128h {
static_assert_rounding!(ROUNDING);
_mm_mask_add_round_sh::<ROUNDING>(f16x8::ZERO.as_m128h(), 0xff, a, b)
@@ -1640,7 +1640,7 @@ pub fn _mm_add_round_sh<const ROUNDING: i32>(a: __m128h, b: __m128h) -> __m128h
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vaddsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_add_round_sh<const ROUNDING: i32>(
src: __m128h,
k: __mmask8,
@@ -1669,7 +1669,7 @@ pub fn _mm_mask_add_round_sh<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vaddsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_add_round_sh<const ROUNDING: i32>(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
static_assert_rounding!(ROUNDING);
_mm_mask_add_round_sh::<ROUNDING>(f16x8::ZERO.as_m128h(), k, a, b)
@@ -1682,7 +1682,7 @@ pub fn _mm_maskz_add_round_sh<const ROUNDING: i32>(k: __mmask8, a: __m128h, b: _
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vaddsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_add_sh(a: __m128h, b: __m128h) -> __m128h {
unsafe { simd_insert!(a, 0, _mm_cvtsh_h(a) + _mm_cvtsh_h(b)) }
@@ -1696,7 +1696,7 @@ pub const fn _mm_add_sh(a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vaddsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_mask_add_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
unsafe {
@@ -1719,7 +1719,7 @@ pub const fn _mm_mask_add_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vaddsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_maskz_add_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
unsafe {
@@ -1739,7 +1739,7 @@ pub const fn _mm_maskz_add_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vsubph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_sub_ph(a: __m128h, b: __m128h) -> __m128h {
unsafe { simd_sub(a, b) }
@@ -1752,7 +1752,7 @@ pub const fn _mm_sub_ph(a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vsubph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_mask_sub_ph(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
unsafe {
@@ -1768,7 +1768,7 @@ pub const fn _mm_mask_sub_ph(src: __m128h, k: __mmask8, a: __m128h, b: __m128h)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vsubph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_maskz_sub_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
unsafe {
@@ -1783,7 +1783,7 @@ pub const fn _mm_maskz_sub_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vsubph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_sub_ph(a: __m256h, b: __m256h) -> __m256h {
unsafe { simd_sub(a, b) }
@@ -1796,7 +1796,7 @@ pub const fn _mm256_sub_ph(a: __m256h, b: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vsubph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_mask_sub_ph(src: __m256h, k: __mmask16, a: __m256h, b: __m256h) -> __m256h {
unsafe {
@@ -1812,7 +1812,7 @@ pub const fn _mm256_mask_sub_ph(src: __m256h, k: __mmask16, a: __m256h, b: __m25
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vsubph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_maskz_sub_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256h {
unsafe {
@@ -1827,7 +1827,7 @@ pub const fn _mm256_maskz_sub_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vsubph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_sub_ph(a: __m512h, b: __m512h) -> __m512h {
unsafe { simd_sub(a, b) }
@@ -1840,7 +1840,7 @@ pub const fn _mm512_sub_ph(a: __m512h, b: __m512h) -> __m512h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vsubph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_mask_sub_ph(src: __m512h, k: __mmask32, a: __m512h, b: __m512h) -> __m512h {
unsafe {
@@ -1856,7 +1856,7 @@ pub const fn _mm512_mask_sub_ph(src: __m512h, k: __mmask32, a: __m512h, b: __m51
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vsubph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_maskz_sub_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h {
unsafe {
@@ -1879,7 +1879,7 @@ pub const fn _mm512_maskz_sub_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vsubph, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_sub_round_ph<const ROUNDING: i32>(a: __m512h, b: __m512h) -> __m512h {
unsafe {
static_assert_rounding!(ROUNDING);
@@ -1902,7 +1902,7 @@ pub fn _mm512_sub_round_ph<const ROUNDING: i32>(a: __m512h, b: __m512h) -> __m51
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vsubph, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_sub_round_ph<const ROUNDING: i32>(
src: __m512h,
k: __mmask32,
@@ -1931,7 +1931,7 @@ pub fn _mm512_mask_sub_round_ph<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vsubph, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_sub_round_ph<const ROUNDING: i32>(
k: __mmask32,
a: __m512h,
@@ -1959,7 +1959,7 @@ pub fn _mm512_maskz_sub_round_ph<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vsubsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_sub_round_sh<const ROUNDING: i32>(a: __m128h, b: __m128h) -> __m128h {
static_assert_rounding!(ROUNDING);
_mm_mask_sub_round_sh::<ROUNDING>(f16x8::ZERO.as_m128h(), 0xff, a, b)
@@ -1981,7 +1981,7 @@ pub fn _mm_sub_round_sh<const ROUNDING: i32>(a: __m128h, b: __m128h) -> __m128h
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vsubsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_sub_round_sh<const ROUNDING: i32>(
src: __m128h,
k: __mmask8,
@@ -2010,7 +2010,7 @@ pub fn _mm_mask_sub_round_sh<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vsubsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_sub_round_sh<const ROUNDING: i32>(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
static_assert_rounding!(ROUNDING);
_mm_mask_sub_round_sh::<ROUNDING>(f16x8::ZERO.as_m128h(), k, a, b)
@@ -2023,7 +2023,7 @@ pub fn _mm_maskz_sub_round_sh<const ROUNDING: i32>(k: __mmask8, a: __m128h, b: _
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vsubsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_sub_sh(a: __m128h, b: __m128h) -> __m128h {
unsafe { simd_insert!(a, 0, _mm_cvtsh_h(a) - _mm_cvtsh_h(b)) }
@@ -2037,7 +2037,7 @@ pub const fn _mm_sub_sh(a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vsubsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_mask_sub_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
unsafe {
@@ -2060,7 +2060,7 @@ pub const fn _mm_mask_sub_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vsubsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_maskz_sub_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
unsafe {
@@ -2080,7 +2080,7 @@ pub const fn _mm_maskz_sub_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vmulph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_mul_ph(a: __m128h, b: __m128h) -> __m128h {
unsafe { simd_mul(a, b) }
@@ -2093,7 +2093,7 @@ pub const fn _mm_mul_ph(a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vmulph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_mask_mul_ph(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
unsafe {
@@ -2109,7 +2109,7 @@ pub const fn _mm_mask_mul_ph(src: __m128h, k: __mmask8, a: __m128h, b: __m128h)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vmulph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_maskz_mul_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
unsafe {
@@ -2124,7 +2124,7 @@ pub const fn _mm_maskz_mul_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vmulph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_mul_ph(a: __m256h, b: __m256h) -> __m256h {
unsafe { simd_mul(a, b) }
@@ -2137,7 +2137,7 @@ pub const fn _mm256_mul_ph(a: __m256h, b: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vmulph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_mask_mul_ph(src: __m256h, k: __mmask16, a: __m256h, b: __m256h) -> __m256h {
unsafe {
@@ -2153,7 +2153,7 @@ pub const fn _mm256_mask_mul_ph(src: __m256h, k: __mmask16, a: __m256h, b: __m25
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vmulph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_maskz_mul_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256h {
unsafe {
@@ -2168,7 +2168,7 @@ pub const fn _mm256_maskz_mul_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vmulph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_mul_ph(a: __m512h, b: __m512h) -> __m512h {
unsafe { simd_mul(a, b) }
@@ -2181,7 +2181,7 @@ pub const fn _mm512_mul_ph(a: __m512h, b: __m512h) -> __m512h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vmulph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_mask_mul_ph(src: __m512h, k: __mmask32, a: __m512h, b: __m512h) -> __m512h {
unsafe {
@@ -2197,7 +2197,7 @@ pub const fn _mm512_mask_mul_ph(src: __m512h, k: __mmask32, a: __m512h, b: __m51
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vmulph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_maskz_mul_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h {
unsafe {
@@ -2220,7 +2220,7 @@ pub const fn _mm512_maskz_mul_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vmulph, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mul_round_ph<const ROUNDING: i32>(a: __m512h, b: __m512h) -> __m512h {
unsafe {
static_assert_rounding!(ROUNDING);
@@ -2243,7 +2243,7 @@ pub fn _mm512_mul_round_ph<const ROUNDING: i32>(a: __m512h, b: __m512h) -> __m51
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vmulph, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_mul_round_ph<const ROUNDING: i32>(
src: __m512h,
k: __mmask32,
@@ -2272,7 +2272,7 @@ pub fn _mm512_mask_mul_round_ph<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vmulph, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_mul_round_ph<const ROUNDING: i32>(
k: __mmask32,
a: __m512h,
@@ -2300,7 +2300,7 @@ pub fn _mm512_maskz_mul_round_ph<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vmulsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mul_round_sh<const ROUNDING: i32>(a: __m128h, b: __m128h) -> __m128h {
static_assert_rounding!(ROUNDING);
_mm_mask_mul_round_sh::<ROUNDING>(f16x8::ZERO.as_m128h(), 0xff, a, b)
@@ -2322,7 +2322,7 @@ pub fn _mm_mul_round_sh<const ROUNDING: i32>(a: __m128h, b: __m128h) -> __m128h
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vmulsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_mul_round_sh<const ROUNDING: i32>(
src: __m128h,
k: __mmask8,
@@ -2351,7 +2351,7 @@ pub fn _mm_mask_mul_round_sh<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vmulsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_mul_round_sh<const ROUNDING: i32>(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
static_assert_rounding!(ROUNDING);
_mm_mask_mul_round_sh::<ROUNDING>(f16x8::ZERO.as_m128h(), k, a, b)
@@ -2364,7 +2364,7 @@ pub fn _mm_maskz_mul_round_sh<const ROUNDING: i32>(k: __mmask8, a: __m128h, b: _
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vmulsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_mul_sh(a: __m128h, b: __m128h) -> __m128h {
unsafe { simd_insert!(a, 0, _mm_cvtsh_h(a) * _mm_cvtsh_h(b)) }
@@ -2378,7 +2378,7 @@ pub const fn _mm_mul_sh(a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vmulsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_mask_mul_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
unsafe {
@@ -2401,7 +2401,7 @@ pub const fn _mm_mask_mul_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vmulsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_maskz_mul_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
unsafe {
@@ -2421,7 +2421,7 @@ pub const fn _mm_maskz_mul_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vdivph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_div_ph(a: __m128h, b: __m128h) -> __m128h {
unsafe { simd_div(a, b) }
@@ -2434,7 +2434,7 @@ pub const fn _mm_div_ph(a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vdivph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_mask_div_ph(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
unsafe {
@@ -2450,7 +2450,7 @@ pub const fn _mm_mask_div_ph(src: __m128h, k: __mmask8, a: __m128h, b: __m128h)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vdivph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_maskz_div_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
unsafe {
@@ -2465,7 +2465,7 @@ pub const fn _mm_maskz_div_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vdivph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_div_ph(a: __m256h, b: __m256h) -> __m256h {
unsafe { simd_div(a, b) }
@@ -2478,7 +2478,7 @@ pub const fn _mm256_div_ph(a: __m256h, b: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vdivph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_mask_div_ph(src: __m256h, k: __mmask16, a: __m256h, b: __m256h) -> __m256h {
unsafe {
@@ -2494,7 +2494,7 @@ pub const fn _mm256_mask_div_ph(src: __m256h, k: __mmask16, a: __m256h, b: __m25
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vdivph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_maskz_div_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256h {
unsafe {
@@ -2509,7 +2509,7 @@ pub const fn _mm256_maskz_div_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vdivph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_div_ph(a: __m512h, b: __m512h) -> __m512h {
unsafe { simd_div(a, b) }
@@ -2522,7 +2522,7 @@ pub const fn _mm512_div_ph(a: __m512h, b: __m512h) -> __m512h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vdivph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_mask_div_ph(src: __m512h, k: __mmask32, a: __m512h, b: __m512h) -> __m512h {
unsafe {
@@ -2538,7 +2538,7 @@ pub const fn _mm512_mask_div_ph(src: __m512h, k: __mmask32, a: __m512h, b: __m51
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vdivph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_maskz_div_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h {
unsafe {
@@ -2561,7 +2561,7 @@ pub const fn _mm512_maskz_div_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vdivph, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_div_round_ph<const ROUNDING: i32>(a: __m512h, b: __m512h) -> __m512h {
unsafe {
static_assert_rounding!(ROUNDING);
@@ -2584,7 +2584,7 @@ pub fn _mm512_div_round_ph<const ROUNDING: i32>(a: __m512h, b: __m512h) -> __m51
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vdivph, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_div_round_ph<const ROUNDING: i32>(
src: __m512h,
k: __mmask32,
@@ -2613,7 +2613,7 @@ pub fn _mm512_mask_div_round_ph<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vdivph, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_div_round_ph<const ROUNDING: i32>(
k: __mmask32,
a: __m512h,
@@ -2641,7 +2641,7 @@ pub fn _mm512_maskz_div_round_ph<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vdivsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_div_round_sh<const ROUNDING: i32>(a: __m128h, b: __m128h) -> __m128h {
static_assert_rounding!(ROUNDING);
_mm_mask_div_round_sh::<ROUNDING>(f16x8::ZERO.as_m128h(), 0xff, a, b)
@@ -2663,7 +2663,7 @@ pub fn _mm_div_round_sh<const ROUNDING: i32>(a: __m128h, b: __m128h) -> __m128h
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vdivsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_div_round_sh<const ROUNDING: i32>(
src: __m128h,
k: __mmask8,
@@ -2692,7 +2692,7 @@ pub fn _mm_mask_div_round_sh<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vdivsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_div_round_sh<const ROUNDING: i32>(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
static_assert_rounding!(ROUNDING);
_mm_mask_div_round_sh::<ROUNDING>(f16x8::ZERO.as_m128h(), k, a, b)
@@ -2705,7 +2705,7 @@ pub fn _mm_maskz_div_round_sh<const ROUNDING: i32>(k: __mmask8, a: __m128h, b: _
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vdivsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_div_sh(a: __m128h, b: __m128h) -> __m128h {
unsafe { simd_insert!(a, 0, _mm_cvtsh_h(a) / _mm_cvtsh_h(b)) }
@@ -2719,7 +2719,7 @@ pub const fn _mm_div_sh(a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vdivsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_mask_div_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
unsafe {
@@ -2742,7 +2742,7 @@ pub const fn _mm_mask_div_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vdivsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_maskz_div_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
unsafe {
@@ -2764,7 +2764,7 @@ pub const fn _mm_maskz_div_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mul_pch(a: __m128h, b: __m128h) -> __m128h {
_mm_mask_mul_pch(_mm_undefined_ph(), 0xff, a, b)
}
@@ -2777,7 +2777,7 @@ pub fn _mm_mul_pch(a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_mul_pch(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
unsafe { transmute(vfmulcph_128(transmute(a), transmute(b), transmute(src), k)) }
}
@@ -2790,7 +2790,7 @@ pub fn _mm_mask_mul_pch(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_mul_pch(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
_mm_mask_mul_pch(_mm_setzero_ph(), k, a, b)
}
@@ -2803,7 +2803,7 @@ pub fn _mm_maskz_mul_pch(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mul_pch(a: __m256h, b: __m256h) -> __m256h {
_mm256_mask_mul_pch(_mm256_undefined_ph(), 0xff, a, b)
}
@@ -2816,7 +2816,7 @@ pub fn _mm256_mul_pch(a: __m256h, b: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_mul_pch(src: __m256h, k: __mmask8, a: __m256h, b: __m256h) -> __m256h {
unsafe { transmute(vfmulcph_256(transmute(a), transmute(b), transmute(src), k)) }
}
@@ -2829,7 +2829,7 @@ pub fn _mm256_mask_mul_pch(src: __m256h, k: __mmask8, a: __m256h, b: __m256h) ->
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_mul_pch(k: __mmask8, a: __m256h, b: __m256h) -> __m256h {
_mm256_mask_mul_pch(_mm256_setzero_ph(), k, a, b)
}
@@ -2842,7 +2842,7 @@ pub fn _mm256_maskz_mul_pch(k: __mmask8, a: __m256h, b: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mul_pch(a: __m512h, b: __m512h) -> __m512h {
_mm512_mask_mul_pch(_mm512_undefined_ph(), 0xffff, a, b)
}
@@ -2855,7 +2855,7 @@ pub fn _mm512_mul_pch(a: __m512h, b: __m512h) -> __m512h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_mul_pch(src: __m512h, k: __mmask16, a: __m512h, b: __m512h) -> __m512h {
_mm512_mask_mul_round_pch::<_MM_FROUND_CUR_DIRECTION>(src, k, a, b)
}
@@ -2868,7 +2868,7 @@ pub fn _mm512_mask_mul_pch(src: __m512h, k: __mmask16, a: __m512h, b: __m512h) -
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_mul_pch(k: __mmask16, a: __m512h, b: __m512h) -> __m512h {
_mm512_mask_mul_pch(_mm512_setzero_ph(), k, a, b)
}
@@ -2890,7 +2890,7 @@ pub fn _mm512_maskz_mul_pch(k: __mmask16, a: __m512h, b: __m512h) -> __m512h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmulcph, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mul_round_pch<const ROUNDING: i32>(a: __m512h, b: __m512h) -> __m512h {
static_assert_rounding!(ROUNDING);
_mm512_mask_mul_round_pch::<ROUNDING>(_mm512_undefined_ph(), 0xffff, a, b)
@@ -2913,7 +2913,7 @@ pub fn _mm512_mul_round_pch<const ROUNDING: i32>(a: __m512h, b: __m512h) -> __m5
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmulcph, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_mul_round_pch<const ROUNDING: i32>(
src: __m512h,
k: __mmask16,
@@ -2949,7 +2949,7 @@ pub fn _mm512_mask_mul_round_pch<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmulcph, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_mul_round_pch<const ROUNDING: i32>(
k: __mmask16,
a: __m512h,
@@ -2968,7 +2968,7 @@ pub fn _mm512_maskz_mul_round_pch<const ROUNDING: i32>(
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmulcsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mul_sch(a: __m128h, b: __m128h) -> __m128h {
_mm_mask_mul_sch(f16x8::ZERO.as_m128h(), 0xff, a, b)
}
@@ -2982,7 +2982,7 @@ pub fn _mm_mul_sch(a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmulcsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_mul_sch(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
_mm_mask_mul_round_sch::<_MM_FROUND_CUR_DIRECTION>(src, k, a, b)
}
@@ -2996,7 +2996,7 @@ pub fn _mm_mask_mul_sch(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmulcsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_mul_sch(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
_mm_mask_mul_sch(f16x8::ZERO.as_m128h(), k, a, b)
}
@@ -3019,7 +3019,7 @@ pub fn _mm_maskz_mul_sch(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmulcsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mul_round_sch<const ROUNDING: i32>(a: __m128h, b: __m128h) -> __m128h {
static_assert_rounding!(ROUNDING);
_mm_mask_mul_round_sch::<ROUNDING>(f16x8::ZERO.as_m128h(), 0xff, a, b)
@@ -3043,7 +3043,7 @@ pub fn _mm_mul_round_sch<const ROUNDING: i32>(a: __m128h, b: __m128h) -> __m128h
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmulcsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_mul_round_sch<const ROUNDING: i32>(
src: __m128h,
k: __mmask8,
@@ -3080,7 +3080,7 @@ pub fn _mm_mask_mul_round_sch<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmulcsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_mul_round_sch<const ROUNDING: i32>(
k: __mmask8,
a: __m128h,
@@ -3098,7 +3098,7 @@ pub fn _mm_maskz_mul_round_sch<const ROUNDING: i32>(
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_fmul_pch(a: __m128h, b: __m128h) -> __m128h {
_mm_mul_pch(a, b)
}
@@ -3111,7 +3111,7 @@ pub fn _mm_fmul_pch(a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_fmul_pch(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
_mm_mask_mul_pch(src, k, a, b)
}
@@ -3124,7 +3124,7 @@ pub fn _mm_mask_fmul_pch(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> _
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_fmul_pch(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
_mm_maskz_mul_pch(k, a, b)
}
@@ -3137,7 +3137,7 @@ pub fn _mm_maskz_fmul_pch(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_fmul_pch(a: __m256h, b: __m256h) -> __m256h {
_mm256_mul_pch(a, b)
}
@@ -3150,7 +3150,7 @@ pub fn _mm256_fmul_pch(a: __m256h, b: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_fmul_pch(src: __m256h, k: __mmask8, a: __m256h, b: __m256h) -> __m256h {
_mm256_mask_mul_pch(src, k, a, b)
}
@@ -3163,7 +3163,7 @@ pub fn _mm256_mask_fmul_pch(src: __m256h, k: __mmask8, a: __m256h, b: __m256h) -
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_fmul_pch(k: __mmask8, a: __m256h, b: __m256h) -> __m256h {
_mm256_maskz_mul_pch(k, a, b)
}
@@ -3175,7 +3175,7 @@ pub fn _mm256_maskz_fmul_pch(k: __mmask8, a: __m256h, b: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_fmul_pch(a: __m512h, b: __m512h) -> __m512h {
_mm512_mul_pch(a, b)
}
@@ -3188,7 +3188,7 @@ pub fn _mm512_fmul_pch(a: __m512h, b: __m512h) -> __m512h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_fmul_pch(src: __m512h, k: __mmask16, a: __m512h, b: __m512h) -> __m512h {
_mm512_mask_mul_pch(src, k, a, b)
}
@@ -3201,7 +3201,7 @@ pub fn _mm512_mask_fmul_pch(src: __m512h, k: __mmask16, a: __m512h, b: __m512h)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_fmul_pch(k: __mmask16, a: __m512h, b: __m512h) -> __m512h {
_mm512_maskz_mul_pch(k, a, b)
}
@@ -3221,7 +3221,7 @@ pub fn _mm512_maskz_fmul_pch(k: __mmask16, a: __m512h, b: __m512h) -> __m512h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmulcph, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_fmul_round_pch<const ROUNDING: i32>(a: __m512h, b: __m512h) -> __m512h {
static_assert_rounding!(ROUNDING);
_mm512_mul_round_pch::<ROUNDING>(a, b)
@@ -3243,7 +3243,7 @@ pub fn _mm512_fmul_round_pch<const ROUNDING: i32>(a: __m512h, b: __m512h) -> __m
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmulcph, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_fmul_round_pch<const ROUNDING: i32>(
src: __m512h,
k: __mmask16,
@@ -3270,7 +3270,7 @@ pub fn _mm512_mask_fmul_round_pch<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmulcph, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_fmul_round_pch<const ROUNDING: i32>(
k: __mmask16,
a: __m512h,
@@ -3288,7 +3288,7 @@ pub fn _mm512_maskz_fmul_round_pch<const ROUNDING: i32>(
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmulcsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_fmul_sch(a: __m128h, b: __m128h) -> __m128h {
_mm_mul_sch(a, b)
}
@@ -3301,7 +3301,7 @@ pub fn _mm_fmul_sch(a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmulcsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_fmul_sch(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
_mm_mask_mul_sch(src, k, a, b)
}
@@ -3314,7 +3314,7 @@ pub fn _mm_mask_fmul_sch(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> _
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmulcsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_fmul_sch(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
_mm_maskz_mul_sch(k, a, b)
}
@@ -3335,7 +3335,7 @@ pub fn _mm_maskz_fmul_sch(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmulcsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_fmul_round_sch<const ROUNDING: i32>(a: __m128h, b: __m128h) -> __m128h {
static_assert_rounding!(ROUNDING);
_mm_mul_round_sch::<ROUNDING>(a, b)
@@ -3358,7 +3358,7 @@ pub fn _mm_fmul_round_sch<const ROUNDING: i32>(a: __m128h, b: __m128h) -> __m128
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmulcsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_fmul_round_sch<const ROUNDING: i32>(
src: __m128h,
k: __mmask8,
@@ -3386,7 +3386,7 @@ pub fn _mm_mask_fmul_round_sch<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmulcsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_fmul_round_sch<const ROUNDING: i32>(
k: __mmask8,
a: __m128h,
@@ -3405,7 +3405,7 @@ pub fn _mm_maskz_fmul_round_sch<const ROUNDING: i32>(
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfcmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cmul_pch(a: __m128h, b: __m128h) -> __m128h {
_mm_mask_cmul_pch(_mm_undefined_ph(), 0xff, a, b)
}
@@ -3419,7 +3419,7 @@ pub fn _mm_cmul_pch(a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfcmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cmul_pch(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
unsafe { transmute(vfcmulcph_128(transmute(a), transmute(b), transmute(src), k)) }
}
@@ -3433,7 +3433,7 @@ pub fn _mm_mask_cmul_pch(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> _
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfcmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_cmul_pch(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
_mm_mask_cmul_pch(_mm_setzero_ph(), k, a, b)
}
@@ -3447,7 +3447,7 @@ pub fn _mm_maskz_cmul_pch(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfcmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_cmul_pch(a: __m256h, b: __m256h) -> __m256h {
_mm256_mask_cmul_pch(_mm256_undefined_ph(), 0xff, a, b)
}
@@ -3461,7 +3461,7 @@ pub fn _mm256_cmul_pch(a: __m256h, b: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfcmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_cmul_pch(src: __m256h, k: __mmask8, a: __m256h, b: __m256h) -> __m256h {
unsafe { transmute(vfcmulcph_256(transmute(a), transmute(b), transmute(src), k)) }
}
@@ -3475,7 +3475,7 @@ pub fn _mm256_mask_cmul_pch(src: __m256h, k: __mmask8, a: __m256h, b: __m256h) -
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfcmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_cmul_pch(k: __mmask8, a: __m256h, b: __m256h) -> __m256h {
_mm256_mask_cmul_pch(_mm256_setzero_ph(), k, a, b)
}
@@ -3489,7 +3489,7 @@ pub fn _mm256_maskz_cmul_pch(k: __mmask8, a: __m256h, b: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cmul_pch(a: __m512h, b: __m512h) -> __m512h {
_mm512_mask_cmul_pch(_mm512_undefined_ph(), 0xffff, a, b)
}
@@ -3503,7 +3503,7 @@ pub fn _mm512_cmul_pch(a: __m512h, b: __m512h) -> __m512h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cmul_pch(src: __m512h, k: __mmask16, a: __m512h, b: __m512h) -> __m512h {
_mm512_mask_cmul_round_pch::<_MM_FROUND_CUR_DIRECTION>(src, k, a, b)
}
@@ -3517,7 +3517,7 @@ pub fn _mm512_mask_cmul_pch(src: __m512h, k: __mmask16, a: __m512h, b: __m512h)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cmul_pch(k: __mmask16, a: __m512h, b: __m512h) -> __m512h {
_mm512_mask_cmul_pch(_mm512_setzero_ph(), k, a, b)
}
@@ -3540,7 +3540,7 @@ pub fn _mm512_maskz_cmul_pch(k: __mmask16, a: __m512h, b: __m512h) -> __m512h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmulcph, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cmul_round_pch<const ROUNDING: i32>(a: __m512h, b: __m512h) -> __m512h {
static_assert_rounding!(ROUNDING);
_mm512_mask_cmul_round_pch::<ROUNDING>(_mm512_undefined_ph(), 0xffff, a, b)
@@ -3564,7 +3564,7 @@ pub fn _mm512_cmul_round_pch<const ROUNDING: i32>(a: __m512h, b: __m512h) -> __m
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmulcph, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cmul_round_pch<const ROUNDING: i32>(
src: __m512h,
k: __mmask16,
@@ -3601,7 +3601,7 @@ pub fn _mm512_mask_cmul_round_pch<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmulcph, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cmul_round_pch<const ROUNDING: i32>(
k: __mmask16,
a: __m512h,
@@ -3619,7 +3619,7 @@ pub fn _mm512_maskz_cmul_round_pch<const ROUNDING: i32>(
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmulcsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cmul_sch(a: __m128h, b: __m128h) -> __m128h {
_mm_mask_cmul_sch(f16x8::ZERO.as_m128h(), 0xff, a, b)
}
@@ -3633,7 +3633,7 @@ pub fn _mm_cmul_sch(a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmulcsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cmul_sch(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
_mm_mask_cmul_round_sch::<_MM_FROUND_CUR_DIRECTION>(src, k, a, b)
}
@@ -3647,7 +3647,7 @@ pub fn _mm_mask_cmul_sch(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> _
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmulcsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_cmul_sch(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
_mm_mask_cmul_sch(f16x8::ZERO.as_m128h(), k, a, b)
}
@@ -3669,7 +3669,7 @@ pub fn _mm_maskz_cmul_sch(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmulcsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cmul_round_sch<const ROUNDING: i32>(a: __m128h, b: __m128h) -> __m128h {
static_assert_rounding!(ROUNDING);
_mm_mask_cmul_round_sch::<ROUNDING>(f16x8::ZERO.as_m128h(), 0xff, a, b)
@@ -3693,7 +3693,7 @@ pub fn _mm_cmul_round_sch<const ROUNDING: i32>(a: __m128h, b: __m128h) -> __m128
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmulcsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cmul_round_sch<const ROUNDING: i32>(
src: __m128h,
k: __mmask8,
@@ -3730,7 +3730,7 @@ pub fn _mm_mask_cmul_round_sch<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmulcsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_cmul_round_sch<const ROUNDING: i32>(
k: __mmask8,
a: __m128h,
@@ -3749,7 +3749,7 @@ pub fn _mm_maskz_cmul_round_sch<const ROUNDING: i32>(
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfcmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_fcmul_pch(a: __m128h, b: __m128h) -> __m128h {
_mm_cmul_pch(a, b)
}
@@ -3763,7 +3763,7 @@ pub fn _mm_fcmul_pch(a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfcmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_fcmul_pch(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
_mm_mask_cmul_pch(src, k, a, b)
}
@@ -3777,7 +3777,7 @@ pub fn _mm_mask_fcmul_pch(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) ->
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfcmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_fcmul_pch(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
_mm_maskz_cmul_pch(k, a, b)
}
@@ -3791,7 +3791,7 @@ pub fn _mm_maskz_fcmul_pch(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfcmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_fcmul_pch(a: __m256h, b: __m256h) -> __m256h {
_mm256_cmul_pch(a, b)
}
@@ -3805,7 +3805,7 @@ pub fn _mm256_fcmul_pch(a: __m256h, b: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfcmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_fcmul_pch(src: __m256h, k: __mmask8, a: __m256h, b: __m256h) -> __m256h {
_mm256_mask_cmul_pch(src, k, a, b)
}
@@ -3819,7 +3819,7 @@ pub fn _mm256_mask_fcmul_pch(src: __m256h, k: __mmask8, a: __m256h, b: __m256h)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfcmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_fcmul_pch(k: __mmask8, a: __m256h, b: __m256h) -> __m256h {
_mm256_maskz_cmul_pch(k, a, b)
}
@@ -3833,7 +3833,7 @@ pub fn _mm256_maskz_fcmul_pch(k: __mmask8, a: __m256h, b: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_fcmul_pch(a: __m512h, b: __m512h) -> __m512h {
_mm512_cmul_pch(a, b)
}
@@ -3847,7 +3847,7 @@ pub fn _mm512_fcmul_pch(a: __m512h, b: __m512h) -> __m512h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_fcmul_pch(src: __m512h, k: __mmask16, a: __m512h, b: __m512h) -> __m512h {
_mm512_mask_cmul_pch(src, k, a, b)
}
@@ -3861,7 +3861,7 @@ pub fn _mm512_mask_fcmul_pch(src: __m512h, k: __mmask16, a: __m512h, b: __m512h)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmulcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_fcmul_pch(k: __mmask16, a: __m512h, b: __m512h) -> __m512h {
_mm512_maskz_cmul_pch(k, a, b)
}
@@ -3883,7 +3883,7 @@ pub fn _mm512_maskz_fcmul_pch(k: __mmask16, a: __m512h, b: __m512h) -> __m512h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmulcph, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_fcmul_round_pch<const ROUNDING: i32>(a: __m512h, b: __m512h) -> __m512h {
static_assert_rounding!(ROUNDING);
_mm512_cmul_round_pch::<ROUNDING>(a, b)
@@ -3907,7 +3907,7 @@ pub fn _mm512_fcmul_round_pch<const ROUNDING: i32>(a: __m512h, b: __m512h) -> __
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmulcph, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_fcmul_round_pch<const ROUNDING: i32>(
src: __m512h,
k: __mmask16,
@@ -3936,7 +3936,7 @@ pub fn _mm512_mask_fcmul_round_pch<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmulcph, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_fcmul_round_pch<const ROUNDING: i32>(
k: __mmask16,
a: __m512h,
@@ -3955,7 +3955,7 @@ pub fn _mm512_maskz_fcmul_round_pch<const ROUNDING: i32>(
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmulcsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_fcmul_sch(a: __m128h, b: __m128h) -> __m128h {
_mm_cmul_sch(a, b)
}
@@ -3969,7 +3969,7 @@ pub fn _mm_fcmul_sch(a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmulcsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_fcmul_sch(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
_mm_mask_cmul_sch(src, k, a, b)
}
@@ -3983,7 +3983,7 @@ pub fn _mm_mask_fcmul_sch(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) ->
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmulcsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_fcmul_sch(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
_mm_maskz_cmul_sch(k, a, b)
}
@@ -4005,7 +4005,7 @@ pub fn _mm_maskz_fcmul_sch(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmulcsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_fcmul_round_sch<const ROUNDING: i32>(a: __m128h, b: __m128h) -> __m128h {
static_assert_rounding!(ROUNDING);
_mm_cmul_round_sch::<ROUNDING>(a, b)
@@ -4029,7 +4029,7 @@ pub fn _mm_fcmul_round_sch<const ROUNDING: i32>(a: __m128h, b: __m128h) -> __m12
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmulcsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_fcmul_round_sch<const ROUNDING: i32>(
src: __m128h,
k: __mmask8,
@@ -4058,7 +4058,7 @@ pub fn _mm_mask_fcmul_round_sch<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmulcsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_fcmul_round_sch<const ROUNDING: i32>(
k: __mmask8,
a: __m128h,
@@ -4074,7 +4074,7 @@ pub fn _mm_maskz_fcmul_round_sch<const ROUNDING: i32>(
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_abs_ph)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_abs_ph(v2: __m128h) -> __m128h {
unsafe { transmute(_mm_and_si128(transmute(v2), _mm_set1_epi16(i16::MAX))) }
@@ -4086,7 +4086,7 @@ pub const fn _mm_abs_ph(v2: __m128h) -> __m128h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_abs_ph)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_abs_ph(v2: __m256h) -> __m256h {
unsafe { transmute(_mm256_and_si256(transmute(v2), _mm256_set1_epi16(i16::MAX))) }
@@ -4098,7 +4098,7 @@ pub const fn _mm256_abs_ph(v2: __m256h) -> __m256h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_abs_ph)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_abs_ph(v2: __m512h) -> __m512h {
unsafe { transmute(_mm512_and_si512(transmute(v2), _mm512_set1_epi16(i16::MAX))) }
@@ -4112,7 +4112,7 @@ pub const fn _mm512_abs_ph(v2: __m512h) -> __m512h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_conj_pch)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_conj_pch(a: __m128h) -> __m128h {
unsafe { transmute(_mm_xor_si128(transmute(a), _mm_set1_epi32(i32::MIN))) }
@@ -4126,7 +4126,7 @@ pub const fn _mm_conj_pch(a: __m128h) -> __m128h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_conj_pch)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_mask_conj_pch(src: __m128h, k: __mmask8, a: __m128h) -> __m128h {
unsafe {
@@ -4143,7 +4143,7 @@ pub const fn _mm_mask_conj_pch(src: __m128h, k: __mmask8, a: __m128h) -> __m128h
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_conj_pch)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_maskz_conj_pch(k: __mmask8, a: __m128h) -> __m128h {
_mm_mask_conj_pch(_mm_setzero_ph(), k, a)
@@ -4156,7 +4156,7 @@ pub const fn _mm_maskz_conj_pch(k: __mmask8, a: __m128h) -> __m128h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_conj_pch)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_conj_pch(a: __m256h) -> __m256h {
unsafe { transmute(_mm256_xor_si256(transmute(a), _mm256_set1_epi32(i32::MIN))) }
@@ -4170,7 +4170,7 @@ pub const fn _mm256_conj_pch(a: __m256h) -> __m256h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_conj_pch)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_mask_conj_pch(src: __m256h, k: __mmask8, a: __m256h) -> __m256h {
unsafe {
@@ -4187,7 +4187,7 @@ pub const fn _mm256_mask_conj_pch(src: __m256h, k: __mmask8, a: __m256h) -> __m2
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_conj_pch)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_maskz_conj_pch(k: __mmask8, a: __m256h) -> __m256h {
_mm256_mask_conj_pch(_mm256_setzero_ph(), k, a)
@@ -4200,7 +4200,7 @@ pub const fn _mm256_maskz_conj_pch(k: __mmask8, a: __m256h) -> __m256h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_conj_pch)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_conj_pch(a: __m512h) -> __m512h {
unsafe { transmute(_mm512_xor_si512(transmute(a), _mm512_set1_epi32(i32::MIN))) }
@@ -4214,7 +4214,7 @@ pub const fn _mm512_conj_pch(a: __m512h) -> __m512h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_conj_pch)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_mask_conj_pch(src: __m512h, k: __mmask16, a: __m512h) -> __m512h {
unsafe {
@@ -4231,7 +4231,7 @@ pub const fn _mm512_mask_conj_pch(src: __m512h, k: __mmask16, a: __m512h) -> __m
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_conj_pch)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_maskz_conj_pch(k: __mmask16, a: __m512h) -> __m512h {
_mm512_mask_conj_pch(_mm512_setzero_ph(), k, a)
@@ -4245,7 +4245,7 @@ pub const fn _mm512_maskz_conj_pch(k: __mmask16, a: __m512h) -> __m512h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmaddcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_fmadd_pch(a: __m128h, b: __m128h, c: __m128h) -> __m128h {
_mm_mask3_fmadd_pch(a, b, c, 0xff)
}
@@ -4259,7 +4259,7 @@ pub fn _mm_fmadd_pch(a: __m128h, b: __m128h, c: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmaddcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_fmadd_pch(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h {
unsafe {
let r: __m128 = transmute(_mm_mask3_fmadd_pch(a, b, c, k)); // using `0xff` would have been fine here, but this is what CLang does
@@ -4276,7 +4276,7 @@ pub fn _mm_mask_fmadd_pch(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmaddcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask3_fmadd_pch(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h {
unsafe {
transmute(vfmaddcph_mask3_128(
@@ -4297,7 +4297,7 @@ pub fn _mm_mask3_fmadd_pch(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> _
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmaddcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_fmadd_pch(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h {
unsafe {
transmute(vfmaddcph_maskz_128(
@@ -4317,7 +4317,7 @@ pub fn _mm_maskz_fmadd_pch(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> _
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmaddcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_fmadd_pch(a: __m256h, b: __m256h, c: __m256h) -> __m256h {
_mm256_mask3_fmadd_pch(a, b, c, 0xff)
}
@@ -4331,7 +4331,7 @@ pub fn _mm256_fmadd_pch(a: __m256h, b: __m256h, c: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmaddcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_fmadd_pch(a: __m256h, k: __mmask8, b: __m256h, c: __m256h) -> __m256h {
unsafe {
let r: __m256 = transmute(_mm256_mask3_fmadd_pch(a, b, c, k)); // using `0xff` would have been fine here, but this is what CLang does
@@ -4348,7 +4348,7 @@ pub fn _mm256_mask_fmadd_pch(a: __m256h, k: __mmask8, b: __m256h, c: __m256h) ->
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmaddcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask3_fmadd_pch(a: __m256h, b: __m256h, c: __m256h, k: __mmask8) -> __m256h {
unsafe {
transmute(vfmaddcph_mask3_256(
@@ -4369,7 +4369,7 @@ pub fn _mm256_mask3_fmadd_pch(a: __m256h, b: __m256h, c: __m256h, k: __mmask8) -
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmaddcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_fmadd_pch(k: __mmask8, a: __m256h, b: __m256h, c: __m256h) -> __m256h {
unsafe {
transmute(vfmaddcph_maskz_256(
@@ -4389,7 +4389,7 @@ pub fn _mm256_maskz_fmadd_pch(k: __mmask8, a: __m256h, b: __m256h, c: __m256h) -
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmaddcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_fmadd_pch(a: __m512h, b: __m512h, c: __m512h) -> __m512h {
_mm512_fmadd_round_pch::<_MM_FROUND_CUR_DIRECTION>(a, b, c)
}
@@ -4403,7 +4403,7 @@ pub fn _mm512_fmadd_pch(a: __m512h, b: __m512h, c: __m512h) -> __m512h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmaddcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_fmadd_pch(a: __m512h, k: __mmask16, b: __m512h, c: __m512h) -> __m512h {
_mm512_mask_fmadd_round_pch::<_MM_FROUND_CUR_DIRECTION>(a, k, b, c)
}
@@ -4417,7 +4417,7 @@ pub fn _mm512_mask_fmadd_pch(a: __m512h, k: __mmask16, b: __m512h, c: __m512h) -
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmaddcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask3_fmadd_pch(a: __m512h, b: __m512h, c: __m512h, k: __mmask16) -> __m512h {
_mm512_mask3_fmadd_round_pch::<_MM_FROUND_CUR_DIRECTION>(a, b, c, k)
}
@@ -4431,7 +4431,7 @@ pub fn _mm512_mask3_fmadd_pch(a: __m512h, b: __m512h, c: __m512h, k: __mmask16)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmaddcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_fmadd_pch(k: __mmask16, a: __m512h, b: __m512h, c: __m512h) -> __m512h {
_mm512_maskz_fmadd_round_pch::<_MM_FROUND_CUR_DIRECTION>(k, a, b, c)
}
@@ -4453,7 +4453,7 @@ pub fn _mm512_maskz_fmadd_pch(k: __mmask16, a: __m512h, b: __m512h, c: __m512h)
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmaddcph, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_fmadd_round_pch<const ROUNDING: i32>(a: __m512h, b: __m512h, c: __m512h) -> __m512h {
static_assert_rounding!(ROUNDING);
_mm512_mask3_fmadd_round_pch::<ROUNDING>(a, b, c, 0xffff)
@@ -4477,7 +4477,7 @@ pub fn _mm512_fmadd_round_pch<const ROUNDING: i32>(a: __m512h, b: __m512h, c: __
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmaddcph, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_fmadd_round_pch<const ROUNDING: i32>(
a: __m512h,
k: __mmask16,
@@ -4509,7 +4509,7 @@ pub fn _mm512_mask_fmadd_round_pch<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmaddcph, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask3_fmadd_round_pch<const ROUNDING: i32>(
a: __m512h,
b: __m512h,
@@ -4546,7 +4546,7 @@ pub fn _mm512_mask3_fmadd_round_pch<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmaddcph, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_fmadd_round_pch<const ROUNDING: i32>(
k: __mmask16,
a: __m512h,
@@ -4574,7 +4574,7 @@ pub fn _mm512_maskz_fmadd_round_pch<const ROUNDING: i32>(
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmaddcsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_fmadd_sch(a: __m128h, b: __m128h, c: __m128h) -> __m128h {
_mm_fmadd_round_sch::<_MM_FROUND_CUR_DIRECTION>(a, b, c)
}
@@ -4589,7 +4589,7 @@ pub fn _mm_fmadd_sch(a: __m128h, b: __m128h, c: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmaddcsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_fmadd_sch(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h {
_mm_mask_fmadd_round_sch::<_MM_FROUND_CUR_DIRECTION>(a, k, b, c)
}
@@ -4604,7 +4604,7 @@ pub fn _mm_mask_fmadd_sch(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmaddcsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask3_fmadd_sch(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h {
_mm_mask3_fmadd_round_sch::<_MM_FROUND_CUR_DIRECTION>(a, b, c, k)
}
@@ -4619,7 +4619,7 @@ pub fn _mm_mask3_fmadd_sch(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> _
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmaddcsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_fmadd_sch(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h {
_mm_maskz_fmadd_round_sch::<_MM_FROUND_CUR_DIRECTION>(k, a, b, c)
}
@@ -4641,7 +4641,7 @@ pub fn _mm_maskz_fmadd_sch(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> _
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmaddcsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_fmadd_round_sch<const ROUNDING: i32>(a: __m128h, b: __m128h, c: __m128h) -> __m128h {
unsafe {
static_assert_rounding!(ROUNDING);
@@ -4674,7 +4674,7 @@ pub fn _mm_fmadd_round_sch<const ROUNDING: i32>(a: __m128h, b: __m128h, c: __m12
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmaddcsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_fmadd_round_sch<const ROUNDING: i32>(
a: __m128h,
k: __mmask8,
@@ -4708,7 +4708,7 @@ pub fn _mm_mask_fmadd_round_sch<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmaddcsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask3_fmadd_round_sch<const ROUNDING: i32>(
a: __m128h,
b: __m128h,
@@ -4742,7 +4742,7 @@ pub fn _mm_mask3_fmadd_round_sch<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmaddcsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_fmadd_round_sch<const ROUNDING: i32>(
k: __mmask8,
a: __m128h,
@@ -4770,7 +4770,7 @@ pub fn _mm_maskz_fmadd_round_sch<const ROUNDING: i32>(
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfcmaddcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_fcmadd_pch(a: __m128h, b: __m128h, c: __m128h) -> __m128h {
_mm_mask3_fcmadd_pch(a, b, c, 0xff)
}
@@ -4785,7 +4785,7 @@ pub fn _mm_fcmadd_pch(a: __m128h, b: __m128h, c: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfcmaddcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_fcmadd_pch(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h {
unsafe {
let r: __m128 = transmute(_mm_mask3_fcmadd_pch(a, b, c, k)); // using `0xff` would have been fine here, but this is what CLang does
@@ -4803,7 +4803,7 @@ pub fn _mm_mask_fcmadd_pch(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> _
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfcmaddcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask3_fcmadd_pch(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h {
unsafe {
transmute(vfcmaddcph_mask3_128(
@@ -4825,7 +4825,7 @@ pub fn _mm_mask3_fcmadd_pch(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) ->
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfcmaddcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_fcmadd_pch(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h {
unsafe {
transmute(vfcmaddcph_maskz_128(
@@ -4846,7 +4846,7 @@ pub fn _mm_maskz_fcmadd_pch(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) ->
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfcmaddcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_fcmadd_pch(a: __m256h, b: __m256h, c: __m256h) -> __m256h {
_mm256_mask3_fcmadd_pch(a, b, c, 0xff)
}
@@ -4861,7 +4861,7 @@ pub fn _mm256_fcmadd_pch(a: __m256h, b: __m256h, c: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfcmaddcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_fcmadd_pch(a: __m256h, k: __mmask8, b: __m256h, c: __m256h) -> __m256h {
unsafe {
let r: __m256 = transmute(_mm256_mask3_fcmadd_pch(a, b, c, k)); // using `0xff` would have been fine here, but this is what CLang does
@@ -4879,7 +4879,7 @@ pub fn _mm256_mask_fcmadd_pch(a: __m256h, k: __mmask8, b: __m256h, c: __m256h) -
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfcmaddcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask3_fcmadd_pch(a: __m256h, b: __m256h, c: __m256h, k: __mmask8) -> __m256h {
unsafe {
transmute(vfcmaddcph_mask3_256(
@@ -4901,7 +4901,7 @@ pub fn _mm256_mask3_fcmadd_pch(a: __m256h, b: __m256h, c: __m256h, k: __mmask8)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfcmaddcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_fcmadd_pch(k: __mmask8, a: __m256h, b: __m256h, c: __m256h) -> __m256h {
unsafe {
transmute(vfcmaddcph_maskz_256(
@@ -4922,7 +4922,7 @@ pub fn _mm256_maskz_fcmadd_pch(k: __mmask8, a: __m256h, b: __m256h, c: __m256h)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmaddcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_fcmadd_pch(a: __m512h, b: __m512h, c: __m512h) -> __m512h {
_mm512_fcmadd_round_pch::<_MM_FROUND_CUR_DIRECTION>(a, b, c)
}
@@ -4937,7 +4937,7 @@ pub fn _mm512_fcmadd_pch(a: __m512h, b: __m512h, c: __m512h) -> __m512h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmaddcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_fcmadd_pch(a: __m512h, k: __mmask16, b: __m512h, c: __m512h) -> __m512h {
_mm512_mask_fcmadd_round_pch::<_MM_FROUND_CUR_DIRECTION>(a, k, b, c)
}
@@ -4952,7 +4952,7 @@ pub fn _mm512_mask_fcmadd_pch(a: __m512h, k: __mmask16, b: __m512h, c: __m512h)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmaddcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask3_fcmadd_pch(a: __m512h, b: __m512h, c: __m512h, k: __mmask16) -> __m512h {
_mm512_mask3_fcmadd_round_pch::<_MM_FROUND_CUR_DIRECTION>(a, b, c, k)
}
@@ -4967,7 +4967,7 @@ pub fn _mm512_mask3_fcmadd_pch(a: __m512h, b: __m512h, c: __m512h, k: __mmask16)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmaddcph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_fcmadd_pch(k: __mmask16, a: __m512h, b: __m512h, c: __m512h) -> __m512h {
_mm512_maskz_fcmadd_round_pch::<_MM_FROUND_CUR_DIRECTION>(k, a, b, c)
}
@@ -4990,7 +4990,7 @@ pub fn _mm512_maskz_fcmadd_pch(k: __mmask16, a: __m512h, b: __m512h, c: __m512h)
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmaddcph, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_fcmadd_round_pch<const ROUNDING: i32>(a: __m512h, b: __m512h, c: __m512h) -> __m512h {
static_assert_rounding!(ROUNDING);
_mm512_mask3_fcmadd_round_pch::<ROUNDING>(a, b, c, 0xffff)
@@ -5015,7 +5015,7 @@ pub fn _mm512_fcmadd_round_pch<const ROUNDING: i32>(a: __m512h, b: __m512h, c: _
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmaddcph, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_fcmadd_round_pch<const ROUNDING: i32>(
a: __m512h,
k: __mmask16,
@@ -5048,7 +5048,7 @@ pub fn _mm512_mask_fcmadd_round_pch<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmaddcph, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask3_fcmadd_round_pch<const ROUNDING: i32>(
a: __m512h,
b: __m512h,
@@ -5086,7 +5086,7 @@ pub fn _mm512_mask3_fcmadd_round_pch<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmaddcph, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_fcmadd_round_pch<const ROUNDING: i32>(
k: __mmask16,
a: __m512h,
@@ -5115,7 +5115,7 @@ pub fn _mm512_maskz_fcmadd_round_pch<const ROUNDING: i32>(
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmaddcsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_fcmadd_sch(a: __m128h, b: __m128h, c: __m128h) -> __m128h {
_mm_fcmadd_round_sch::<_MM_FROUND_CUR_DIRECTION>(a, b, c)
}
@@ -5131,7 +5131,7 @@ pub fn _mm_fcmadd_sch(a: __m128h, b: __m128h, c: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmaddcsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_fcmadd_sch(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h {
_mm_mask_fcmadd_round_sch::<_MM_FROUND_CUR_DIRECTION>(a, k, b, c)
}
@@ -5147,7 +5147,7 @@ pub fn _mm_mask_fcmadd_sch(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> _
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmaddcsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask3_fcmadd_sch(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h {
_mm_mask3_fcmadd_round_sch::<_MM_FROUND_CUR_DIRECTION>(a, b, c, k)
}
@@ -5163,7 +5163,7 @@ pub fn _mm_mask3_fcmadd_sch(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) ->
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmaddcsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_fcmadd_sch(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h {
_mm_maskz_fcmadd_round_sch::<_MM_FROUND_CUR_DIRECTION>(k, a, b, c)
}
@@ -5187,7 +5187,7 @@ pub fn _mm_maskz_fcmadd_sch(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) ->
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmaddcsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_fcmadd_round_sch<const ROUNDING: i32>(a: __m128h, b: __m128h, c: __m128h) -> __m128h {
unsafe {
static_assert_rounding!(ROUNDING);
@@ -5221,7 +5221,7 @@ pub fn _mm_fcmadd_round_sch<const ROUNDING: i32>(a: __m128h, b: __m128h, c: __m1
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmaddcsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_fcmadd_round_sch<const ROUNDING: i32>(
a: __m128h,
k: __mmask8,
@@ -5256,7 +5256,7 @@ pub fn _mm_mask_fcmadd_round_sch<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmaddcsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask3_fcmadd_round_sch<const ROUNDING: i32>(
a: __m128h,
b: __m128h,
@@ -5291,7 +5291,7 @@ pub fn _mm_mask3_fcmadd_round_sch<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfcmaddcsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_fcmadd_round_sch<const ROUNDING: i32>(
k: __mmask8,
a: __m128h,
@@ -5317,7 +5317,7 @@ pub fn _mm_maskz_fcmadd_round_sch<const ROUNDING: i32>(
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_fmadd_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h {
unsafe { simd_fma(a, b, c) }
@@ -5331,7 +5331,7 @@ pub const fn _mm_fmadd_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_mask_fmadd_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h {
unsafe { simd_select_bitmask(k, _mm_fmadd_ph(a, b, c), a) }
@@ -5345,7 +5345,7 @@ pub const fn _mm_mask_fmadd_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128h)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_mask3_fmadd_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h {
unsafe { simd_select_bitmask(k, _mm_fmadd_ph(a, b, c), c) }
@@ -5359,7 +5359,7 @@ pub const fn _mm_mask3_fmadd_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmask8)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_maskz_fmadd_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h {
unsafe { simd_select_bitmask(k, _mm_fmadd_ph(a, b, c), _mm_setzero_ph()) }
@@ -5372,7 +5372,7 @@ pub const fn _mm_maskz_fmadd_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m128h)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_fmadd_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h {
unsafe { simd_fma(a, b, c) }
@@ -5386,7 +5386,7 @@ pub const fn _mm256_fmadd_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_mask_fmadd_ph(a: __m256h, k: __mmask16, b: __m256h, c: __m256h) -> __m256h {
unsafe { simd_select_bitmask(k, _mm256_fmadd_ph(a, b, c), a) }
@@ -5400,7 +5400,7 @@ pub const fn _mm256_mask_fmadd_ph(a: __m256h, k: __mmask16, b: __m256h, c: __m25
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_mask3_fmadd_ph(a: __m256h, b: __m256h, c: __m256h, k: __mmask16) -> __m256h {
unsafe { simd_select_bitmask(k, _mm256_fmadd_ph(a, b, c), c) }
@@ -5414,7 +5414,7 @@ pub const fn _mm256_mask3_fmadd_ph(a: __m256h, b: __m256h, c: __m256h, k: __mmas
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_maskz_fmadd_ph(k: __mmask16, a: __m256h, b: __m256h, c: __m256h) -> __m256h {
unsafe { simd_select_bitmask(k, _mm256_fmadd_ph(a, b, c), _mm256_setzero_ph()) }
@@ -5427,7 +5427,7 @@ pub const fn _mm256_maskz_fmadd_ph(k: __mmask16, a: __m256h, b: __m256h, c: __m2
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_fmadd_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h {
unsafe { simd_fma(a, b, c) }
@@ -5441,7 +5441,7 @@ pub const fn _mm512_fmadd_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_mask_fmadd_ph(a: __m512h, k: __mmask32, b: __m512h, c: __m512h) -> __m512h {
unsafe { simd_select_bitmask(k, _mm512_fmadd_ph(a, b, c), a) }
@@ -5455,7 +5455,7 @@ pub const fn _mm512_mask_fmadd_ph(a: __m512h, k: __mmask32, b: __m512h, c: __m51
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_mask3_fmadd_ph(a: __m512h, b: __m512h, c: __m512h, k: __mmask32) -> __m512h {
unsafe { simd_select_bitmask(k, _mm512_fmadd_ph(a, b, c), c) }
@@ -5469,7 +5469,7 @@ pub const fn _mm512_mask3_fmadd_ph(a: __m512h, b: __m512h, c: __m512h, k: __mmas
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_maskz_fmadd_ph(k: __mmask32, a: __m512h, b: __m512h, c: __m512h) -> __m512h {
unsafe { simd_select_bitmask(k, _mm512_fmadd_ph(a, b, c), _mm512_setzero_ph()) }
@@ -5491,7 +5491,7 @@ pub const fn _mm512_maskz_fmadd_ph(k: __mmask32, a: __m512h, b: __m512h, c: __m5
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_fmadd_round_ph<const ROUNDING: i32>(a: __m512h, b: __m512h, c: __m512h) -> __m512h {
unsafe {
static_assert_rounding!(ROUNDING);
@@ -5516,7 +5516,7 @@ pub fn _mm512_fmadd_round_ph<const ROUNDING: i32>(a: __m512h, b: __m512h, c: __m
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_fmadd_round_ph<const ROUNDING: i32>(
a: __m512h,
k: __mmask32,
@@ -5546,7 +5546,7 @@ pub fn _mm512_mask_fmadd_round_ph<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask3_fmadd_round_ph<const ROUNDING: i32>(
a: __m512h,
b: __m512h,
@@ -5576,7 +5576,7 @@ pub fn _mm512_mask3_fmadd_round_ph<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_fmadd_round_ph<const ROUNDING: i32>(
k: __mmask32,
a: __m512h,
@@ -5601,7 +5601,7 @@ pub fn _mm512_maskz_fmadd_round_ph<const ROUNDING: i32>(
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_fmadd_sh(a: __m128h, b: __m128h, c: __m128h) -> __m128h {
unsafe {
@@ -5622,7 +5622,7 @@ pub const fn _mm_fmadd_sh(a: __m128h, b: __m128h, c: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_mask_fmadd_sh(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h {
unsafe {
@@ -5645,7 +5645,7 @@ pub const fn _mm_mask_fmadd_sh(a: __m128h, k: __mmask8, b: __m128h, c: __m128h)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_mask3_fmadd_sh(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h {
unsafe {
@@ -5668,7 +5668,7 @@ pub const fn _mm_mask3_fmadd_sh(a: __m128h, b: __m128h, c: __m128h, k: __mmask8)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_maskz_fmadd_sh(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h {
unsafe {
@@ -5700,7 +5700,7 @@ pub const fn _mm_maskz_fmadd_sh(k: __mmask8, a: __m128h, b: __m128h, c: __m128h)
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_fmadd_round_sh<const ROUNDING: i32>(a: __m128h, b: __m128h, c: __m128h) -> __m128h {
unsafe {
static_assert_rounding!(ROUNDING);
@@ -5730,7 +5730,7 @@ pub fn _mm_fmadd_round_sh<const ROUNDING: i32>(a: __m128h, b: __m128h, c: __m128
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_fmadd_round_sh<const ROUNDING: i32>(
a: __m128h,
k: __mmask8,
@@ -5767,7 +5767,7 @@ pub fn _mm_mask_fmadd_round_sh<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask3_fmadd_round_sh<const ROUNDING: i32>(
a: __m128h,
b: __m128h,
@@ -5804,7 +5804,7 @@ pub fn _mm_mask3_fmadd_round_sh<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_fmadd_round_sh<const ROUNDING: i32>(
k: __mmask8,
a: __m128h,
@@ -5832,7 +5832,7 @@ pub fn _mm_maskz_fmadd_round_sh<const ROUNDING: i32>(
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_fmsub_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h {
unsafe { simd_fma(a, b, simd_neg(c)) }
@@ -5846,7 +5846,7 @@ pub const fn _mm_fmsub_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_mask_fmsub_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h {
unsafe { simd_select_bitmask(k, _mm_fmsub_ph(a, b, c), a) }
@@ -5860,7 +5860,7 @@ pub const fn _mm_mask_fmsub_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128h)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_mask3_fmsub_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h {
unsafe { simd_select_bitmask(k, _mm_fmsub_ph(a, b, c), c) }
@@ -5874,7 +5874,7 @@ pub const fn _mm_mask3_fmsub_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmask8)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_maskz_fmsub_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h {
unsafe { simd_select_bitmask(k, _mm_fmsub_ph(a, b, c), _mm_setzero_ph()) }
@@ -5887,7 +5887,7 @@ pub const fn _mm_maskz_fmsub_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m128h)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_fmsub_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h {
unsafe { simd_fma(a, b, simd_neg(c)) }
@@ -5901,7 +5901,7 @@ pub const fn _mm256_fmsub_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_mask_fmsub_ph(a: __m256h, k: __mmask16, b: __m256h, c: __m256h) -> __m256h {
unsafe { simd_select_bitmask(k, _mm256_fmsub_ph(a, b, c), a) }
@@ -5915,7 +5915,7 @@ pub const fn _mm256_mask_fmsub_ph(a: __m256h, k: __mmask16, b: __m256h, c: __m25
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_mask3_fmsub_ph(a: __m256h, b: __m256h, c: __m256h, k: __mmask16) -> __m256h {
unsafe { simd_select_bitmask(k, _mm256_fmsub_ph(a, b, c), c) }
@@ -5929,7 +5929,7 @@ pub const fn _mm256_mask3_fmsub_ph(a: __m256h, b: __m256h, c: __m256h, k: __mmas
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_maskz_fmsub_ph(k: __mmask16, a: __m256h, b: __m256h, c: __m256h) -> __m256h {
unsafe { simd_select_bitmask(k, _mm256_fmsub_ph(a, b, c), _mm256_setzero_ph()) }
@@ -5942,7 +5942,7 @@ pub const fn _mm256_maskz_fmsub_ph(k: __mmask16, a: __m256h, b: __m256h, c: __m2
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_fmsub_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h {
unsafe { simd_fma(a, b, simd_neg(c)) }
@@ -5956,7 +5956,7 @@ pub const fn _mm512_fmsub_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_mask_fmsub_ph(a: __m512h, k: __mmask32, b: __m512h, c: __m512h) -> __m512h {
unsafe { simd_select_bitmask(k, _mm512_fmsub_ph(a, b, c), a) }
@@ -5970,7 +5970,7 @@ pub const fn _mm512_mask_fmsub_ph(a: __m512h, k: __mmask32, b: __m512h, c: __m51
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_mask3_fmsub_ph(a: __m512h, b: __m512h, c: __m512h, k: __mmask32) -> __m512h {
unsafe { simd_select_bitmask(k, _mm512_fmsub_ph(a, b, c), c) }
@@ -5984,7 +5984,7 @@ pub const fn _mm512_mask3_fmsub_ph(a: __m512h, b: __m512h, c: __m512h, k: __mmas
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_maskz_fmsub_ph(k: __mmask32, a: __m512h, b: __m512h, c: __m512h) -> __m512h {
unsafe { simd_select_bitmask(k, _mm512_fmsub_ph(a, b, c), _mm512_setzero_ph()) }
@@ -6006,7 +6006,7 @@ pub const fn _mm512_maskz_fmsub_ph(k: __mmask32, a: __m512h, b: __m512h, c: __m5
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_fmsub_round_ph<const ROUNDING: i32>(a: __m512h, b: __m512h, c: __m512h) -> __m512h {
unsafe {
static_assert_rounding!(ROUNDING);
@@ -6031,7 +6031,7 @@ pub fn _mm512_fmsub_round_ph<const ROUNDING: i32>(a: __m512h, b: __m512h, c: __m
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_fmsub_round_ph<const ROUNDING: i32>(
a: __m512h,
k: __mmask32,
@@ -6061,7 +6061,7 @@ pub fn _mm512_mask_fmsub_round_ph<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask3_fmsub_round_ph<const ROUNDING: i32>(
a: __m512h,
b: __m512h,
@@ -6091,7 +6091,7 @@ pub fn _mm512_mask3_fmsub_round_ph<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_fmsub_round_ph<const ROUNDING: i32>(
k: __mmask32,
a: __m512h,
@@ -6116,7 +6116,7 @@ pub fn _mm512_maskz_fmsub_round_ph<const ROUNDING: i32>(
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_fmsub_sh(a: __m128h, b: __m128h, c: __m128h) -> __m128h {
unsafe {
@@ -6137,7 +6137,7 @@ pub const fn _mm_fmsub_sh(a: __m128h, b: __m128h, c: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_mask_fmsub_sh(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h {
unsafe {
@@ -6160,7 +6160,7 @@ pub const fn _mm_mask_fmsub_sh(a: __m128h, k: __mmask8, b: __m128h, c: __m128h)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_mask3_fmsub_sh(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h {
unsafe {
@@ -6183,7 +6183,7 @@ pub const fn _mm_mask3_fmsub_sh(a: __m128h, b: __m128h, c: __m128h, k: __mmask8)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_maskz_fmsub_sh(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h {
unsafe {
@@ -6215,7 +6215,7 @@ pub const fn _mm_maskz_fmsub_sh(k: __mmask8, a: __m128h, b: __m128h, c: __m128h)
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_fmsub_round_sh<const ROUNDING: i32>(a: __m128h, b: __m128h, c: __m128h) -> __m128h {
unsafe {
static_assert_rounding!(ROUNDING);
@@ -6245,7 +6245,7 @@ pub fn _mm_fmsub_round_sh<const ROUNDING: i32>(a: __m128h, b: __m128h, c: __m128
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_fmsub_round_sh<const ROUNDING: i32>(
a: __m128h,
k: __mmask8,
@@ -6282,7 +6282,7 @@ pub fn _mm_mask_fmsub_round_sh<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask3_fmsub_round_sh<const ROUNDING: i32>(
a: __m128h,
b: __m128h,
@@ -6311,7 +6311,7 @@ pub fn _mm_mask3_fmsub_round_sh<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_fmsub_round_sh<const ROUNDING: i32>(
k: __mmask8,
a: __m128h,
@@ -6338,7 +6338,7 @@ pub fn _mm_maskz_fmsub_round_sh<const ROUNDING: i32>(
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_fnmadd_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h {
unsafe { simd_fma(simd_neg(a), b, c) }
@@ -6352,7 +6352,7 @@ pub const fn _mm_fnmadd_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_mask_fnmadd_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h {
unsafe { simd_select_bitmask(k, _mm_fnmadd_ph(a, b, c), a) }
@@ -6366,7 +6366,7 @@ pub const fn _mm_mask_fnmadd_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128h)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_mask3_fnmadd_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h {
unsafe { simd_select_bitmask(k, _mm_fnmadd_ph(a, b, c), c) }
@@ -6380,7 +6380,7 @@ pub const fn _mm_mask3_fnmadd_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmask8
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_maskz_fnmadd_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h {
unsafe { simd_select_bitmask(k, _mm_fnmadd_ph(a, b, c), _mm_setzero_ph()) }
@@ -6393,7 +6393,7 @@ pub const fn _mm_maskz_fnmadd_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m128h
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_fnmadd_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h {
unsafe { simd_fma(simd_neg(a), b, c) }
@@ -6407,7 +6407,7 @@ pub const fn _mm256_fnmadd_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_mask_fnmadd_ph(a: __m256h, k: __mmask16, b: __m256h, c: __m256h) -> __m256h {
unsafe { simd_select_bitmask(k, _mm256_fnmadd_ph(a, b, c), a) }
@@ -6421,7 +6421,7 @@ pub const fn _mm256_mask_fnmadd_ph(a: __m256h, k: __mmask16, b: __m256h, c: __m2
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_mask3_fnmadd_ph(a: __m256h, b: __m256h, c: __m256h, k: __mmask16) -> __m256h {
unsafe { simd_select_bitmask(k, _mm256_fnmadd_ph(a, b, c), c) }
@@ -6435,7 +6435,7 @@ pub const fn _mm256_mask3_fnmadd_ph(a: __m256h, b: __m256h, c: __m256h, k: __mma
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_maskz_fnmadd_ph(k: __mmask16, a: __m256h, b: __m256h, c: __m256h) -> __m256h {
unsafe { simd_select_bitmask(k, _mm256_fnmadd_ph(a, b, c), _mm256_setzero_ph()) }
@@ -6448,7 +6448,7 @@ pub const fn _mm256_maskz_fnmadd_ph(k: __mmask16, a: __m256h, b: __m256h, c: __m
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfnmadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_fnmadd_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h {
unsafe { simd_fma(simd_neg(a), b, c) }
@@ -6462,7 +6462,7 @@ pub const fn _mm512_fnmadd_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfnmadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_mask_fnmadd_ph(a: __m512h, k: __mmask32, b: __m512h, c: __m512h) -> __m512h {
unsafe { simd_select_bitmask(k, _mm512_fnmadd_ph(a, b, c), a) }
@@ -6476,7 +6476,7 @@ pub const fn _mm512_mask_fnmadd_ph(a: __m512h, k: __mmask32, b: __m512h, c: __m5
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfnmadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_mask3_fnmadd_ph(a: __m512h, b: __m512h, c: __m512h, k: __mmask32) -> __m512h {
unsafe { simd_select_bitmask(k, _mm512_fnmadd_ph(a, b, c), c) }
@@ -6490,7 +6490,7 @@ pub const fn _mm512_mask3_fnmadd_ph(a: __m512h, b: __m512h, c: __m512h, k: __mma
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfnmadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_maskz_fnmadd_ph(k: __mmask32, a: __m512h, b: __m512h, c: __m512h) -> __m512h {
unsafe { simd_select_bitmask(k, _mm512_fnmadd_ph(a, b, c), _mm512_setzero_ph()) }
@@ -6512,7 +6512,7 @@ pub const fn _mm512_maskz_fnmadd_ph(k: __mmask32, a: __m512h, b: __m512h, c: __m
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_fnmadd_round_ph<const ROUNDING: i32>(a: __m512h, b: __m512h, c: __m512h) -> __m512h {
unsafe {
static_assert_rounding!(ROUNDING);
@@ -6537,7 +6537,7 @@ pub fn _mm512_fnmadd_round_ph<const ROUNDING: i32>(a: __m512h, b: __m512h, c: __
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_fnmadd_round_ph<const ROUNDING: i32>(
a: __m512h,
k: __mmask32,
@@ -6567,7 +6567,7 @@ pub fn _mm512_mask_fnmadd_round_ph<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask3_fnmadd_round_ph<const ROUNDING: i32>(
a: __m512h,
b: __m512h,
@@ -6597,7 +6597,7 @@ pub fn _mm512_mask3_fnmadd_round_ph<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_fnmadd_round_ph<const ROUNDING: i32>(
k: __mmask32,
a: __m512h,
@@ -6622,7 +6622,7 @@ pub fn _mm512_maskz_fnmadd_round_ph<const ROUNDING: i32>(
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfnmadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_fnmadd_sh(a: __m128h, b: __m128h, c: __m128h) -> __m128h {
unsafe {
@@ -6643,7 +6643,7 @@ pub const fn _mm_fnmadd_sh(a: __m128h, b: __m128h, c: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfnmadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_mask_fnmadd_sh(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h {
unsafe {
@@ -6666,7 +6666,7 @@ pub const fn _mm_mask_fnmadd_sh(a: __m128h, k: __mmask8, b: __m128h, c: __m128h)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfnmadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_mask3_fnmadd_sh(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h {
unsafe {
@@ -6689,7 +6689,7 @@ pub const fn _mm_mask3_fnmadd_sh(a: __m128h, b: __m128h, c: __m128h, k: __mmask8
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfnmadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_maskz_fnmadd_sh(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h {
unsafe {
@@ -6721,7 +6721,7 @@ pub const fn _mm_maskz_fnmadd_sh(k: __mmask8, a: __m128h, b: __m128h, c: __m128h
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_fnmadd_round_sh<const ROUNDING: i32>(a: __m128h, b: __m128h, c: __m128h) -> __m128h {
unsafe {
static_assert_rounding!(ROUNDING);
@@ -6751,7 +6751,7 @@ pub fn _mm_fnmadd_round_sh<const ROUNDING: i32>(a: __m128h, b: __m128h, c: __m12
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_fnmadd_round_sh<const ROUNDING: i32>(
a: __m128h,
k: __mmask8,
@@ -6788,7 +6788,7 @@ pub fn _mm_mask_fnmadd_round_sh<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask3_fnmadd_round_sh<const ROUNDING: i32>(
a: __m128h,
b: __m128h,
@@ -6825,7 +6825,7 @@ pub fn _mm_mask3_fnmadd_round_sh<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_fnmadd_round_sh<const ROUNDING: i32>(
k: __mmask8,
a: __m128h,
@@ -6852,7 +6852,7 @@ pub fn _mm_maskz_fnmadd_round_sh<const ROUNDING: i32>(
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_fnmsub_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h {
unsafe { simd_fma(simd_neg(a), b, simd_neg(c)) }
@@ -6866,7 +6866,7 @@ pub const fn _mm_fnmsub_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_mask_fnmsub_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h {
unsafe { simd_select_bitmask(k, _mm_fnmsub_ph(a, b, c), a) }
@@ -6880,7 +6880,7 @@ pub const fn _mm_mask_fnmsub_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128h)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_mask3_fnmsub_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h {
unsafe { simd_select_bitmask(k, _mm_fnmsub_ph(a, b, c), c) }
@@ -6894,7 +6894,7 @@ pub const fn _mm_mask3_fnmsub_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmask8
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_maskz_fnmsub_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h {
unsafe { simd_select_bitmask(k, _mm_fnmsub_ph(a, b, c), _mm_setzero_ph()) }
@@ -6907,7 +6907,7 @@ pub const fn _mm_maskz_fnmsub_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m128h
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_fnmsub_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h {
unsafe { simd_fma(simd_neg(a), b, simd_neg(c)) }
@@ -6921,7 +6921,7 @@ pub const fn _mm256_fnmsub_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_mask_fnmsub_ph(a: __m256h, k: __mmask16, b: __m256h, c: __m256h) -> __m256h {
unsafe { simd_select_bitmask(k, _mm256_fnmsub_ph(a, b, c), a) }
@@ -6935,7 +6935,7 @@ pub const fn _mm256_mask_fnmsub_ph(a: __m256h, k: __mmask16, b: __m256h, c: __m2
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_mask3_fnmsub_ph(a: __m256h, b: __m256h, c: __m256h, k: __mmask16) -> __m256h {
unsafe { simd_select_bitmask(k, _mm256_fnmsub_ph(a, b, c), c) }
@@ -6949,7 +6949,7 @@ pub const fn _mm256_mask3_fnmsub_ph(a: __m256h, b: __m256h, c: __m256h, k: __mma
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_maskz_fnmsub_ph(k: __mmask16, a: __m256h, b: __m256h, c: __m256h) -> __m256h {
unsafe { simd_select_bitmask(k, _mm256_fnmsub_ph(a, b, c), _mm256_setzero_ph()) }
@@ -6962,7 +6962,7 @@ pub const fn _mm256_maskz_fnmsub_ph(k: __mmask16, a: __m256h, b: __m256h, c: __m
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfnmsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_fnmsub_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h {
unsafe { simd_fma(simd_neg(a), b, simd_neg(c)) }
@@ -6976,7 +6976,7 @@ pub const fn _mm512_fnmsub_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfnmsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_mask_fnmsub_ph(a: __m512h, k: __mmask32, b: __m512h, c: __m512h) -> __m512h {
unsafe { simd_select_bitmask(k, _mm512_fnmsub_ph(a, b, c), a) }
@@ -6990,7 +6990,7 @@ pub const fn _mm512_mask_fnmsub_ph(a: __m512h, k: __mmask32, b: __m512h, c: __m5
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfnmsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_mask3_fnmsub_ph(a: __m512h, b: __m512h, c: __m512h, k: __mmask32) -> __m512h {
unsafe { simd_select_bitmask(k, _mm512_fnmsub_ph(a, b, c), c) }
@@ -7004,7 +7004,7 @@ pub const fn _mm512_mask3_fnmsub_ph(a: __m512h, b: __m512h, c: __m512h, k: __mma
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfnmsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_maskz_fnmsub_ph(k: __mmask32, a: __m512h, b: __m512h, c: __m512h) -> __m512h {
unsafe { simd_select_bitmask(k, _mm512_fnmsub_ph(a, b, c), _mm512_setzero_ph()) }
@@ -7026,7 +7026,7 @@ pub const fn _mm512_maskz_fnmsub_ph(k: __mmask32, a: __m512h, b: __m512h, c: __m
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_fnmsub_round_ph<const ROUNDING: i32>(a: __m512h, b: __m512h, c: __m512h) -> __m512h {
unsafe {
static_assert_rounding!(ROUNDING);
@@ -7051,7 +7051,7 @@ pub fn _mm512_fnmsub_round_ph<const ROUNDING: i32>(a: __m512h, b: __m512h, c: __
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_fnmsub_round_ph<const ROUNDING: i32>(
a: __m512h,
k: __mmask32,
@@ -7081,7 +7081,7 @@ pub fn _mm512_mask_fnmsub_round_ph<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask3_fnmsub_round_ph<const ROUNDING: i32>(
a: __m512h,
b: __m512h,
@@ -7111,7 +7111,7 @@ pub fn _mm512_mask3_fnmsub_round_ph<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_fnmsub_round_ph<const ROUNDING: i32>(
k: __mmask32,
a: __m512h,
@@ -7136,7 +7136,7 @@ pub fn _mm512_maskz_fnmsub_round_ph<const ROUNDING: i32>(
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfnmsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_fnmsub_sh(a: __m128h, b: __m128h, c: __m128h) -> __m128h {
unsafe {
@@ -7157,7 +7157,7 @@ pub const fn _mm_fnmsub_sh(a: __m128h, b: __m128h, c: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfnmsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_mask_fnmsub_sh(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h {
unsafe {
@@ -7180,7 +7180,7 @@ pub const fn _mm_mask_fnmsub_sh(a: __m128h, k: __mmask8, b: __m128h, c: __m128h)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfnmsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_mask3_fnmsub_sh(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h {
unsafe {
@@ -7203,7 +7203,7 @@ pub const fn _mm_mask3_fnmsub_sh(a: __m128h, b: __m128h, c: __m128h, k: __mmask8
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfnmsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_maskz_fnmsub_sh(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h {
unsafe {
@@ -7235,7 +7235,7 @@ pub const fn _mm_maskz_fnmsub_sh(k: __mmask8, a: __m128h, b: __m128h, c: __m128h
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_fnmsub_round_sh<const ROUNDING: i32>(a: __m128h, b: __m128h, c: __m128h) -> __m128h {
unsafe {
static_assert_rounding!(ROUNDING);
@@ -7265,7 +7265,7 @@ pub fn _mm_fnmsub_round_sh<const ROUNDING: i32>(a: __m128h, b: __m128h, c: __m12
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_fnmsub_round_sh<const ROUNDING: i32>(
a: __m128h,
k: __mmask8,
@@ -7302,7 +7302,7 @@ pub fn _mm_mask_fnmsub_round_sh<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask3_fnmsub_round_sh<const ROUNDING: i32>(
a: __m128h,
b: __m128h,
@@ -7339,7 +7339,7 @@ pub fn _mm_mask3_fnmsub_round_sh<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_fnmsub_round_sh<const ROUNDING: i32>(
k: __mmask8,
a: __m128h,
@@ -7366,7 +7366,7 @@ pub fn _mm_maskz_fnmsub_round_sh<const ROUNDING: i32>(
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmaddsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_fmaddsub_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h {
unsafe {
@@ -7384,7 +7384,7 @@ pub const fn _mm_fmaddsub_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmaddsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_mask_fmaddsub_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h {
unsafe { simd_select_bitmask(k, _mm_fmaddsub_ph(a, b, c), a) }
@@ -7398,7 +7398,7 @@ pub const fn _mm_mask_fmaddsub_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmaddsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_mask3_fmaddsub_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h {
unsafe { simd_select_bitmask(k, _mm_fmaddsub_ph(a, b, c), c) }
@@ -7412,7 +7412,7 @@ pub const fn _mm_mask3_fmaddsub_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmas
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmaddsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_maskz_fmaddsub_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h {
unsafe { simd_select_bitmask(k, _mm_fmaddsub_ph(a, b, c), _mm_setzero_ph()) }
@@ -7425,7 +7425,7 @@ pub const fn _mm_maskz_fmaddsub_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m12
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmaddsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_fmaddsub_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h {
unsafe {
@@ -7447,7 +7447,7 @@ pub const fn _mm256_fmaddsub_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmaddsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_mask_fmaddsub_ph(a: __m256h, k: __mmask16, b: __m256h, c: __m256h) -> __m256h {
unsafe { simd_select_bitmask(k, _mm256_fmaddsub_ph(a, b, c), a) }
@@ -7461,7 +7461,7 @@ pub const fn _mm256_mask_fmaddsub_ph(a: __m256h, k: __mmask16, b: __m256h, c: __
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmaddsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_mask3_fmaddsub_ph(a: __m256h, b: __m256h, c: __m256h, k: __mmask16) -> __m256h {
unsafe { simd_select_bitmask(k, _mm256_fmaddsub_ph(a, b, c), c) }
@@ -7475,7 +7475,7 @@ pub const fn _mm256_mask3_fmaddsub_ph(a: __m256h, b: __m256h, c: __m256h, k: __m
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmaddsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_maskz_fmaddsub_ph(k: __mmask16, a: __m256h, b: __m256h, c: __m256h) -> __m256h {
unsafe { simd_select_bitmask(k, _mm256_fmaddsub_ph(a, b, c), _mm256_setzero_ph()) }
@@ -7488,7 +7488,7 @@ pub const fn _mm256_maskz_fmaddsub_ph(k: __mmask16, a: __m256h, b: __m256h, c: _
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmaddsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_fmaddsub_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h {
unsafe {
@@ -7513,7 +7513,7 @@ pub const fn _mm512_fmaddsub_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmaddsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_mask_fmaddsub_ph(a: __m512h, k: __mmask32, b: __m512h, c: __m512h) -> __m512h {
unsafe { simd_select_bitmask(k, _mm512_fmaddsub_ph(a, b, c), a) }
@@ -7527,7 +7527,7 @@ pub const fn _mm512_mask_fmaddsub_ph(a: __m512h, k: __mmask32, b: __m512h, c: __
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmaddsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_mask3_fmaddsub_ph(a: __m512h, b: __m512h, c: __m512h, k: __mmask32) -> __m512h {
unsafe { simd_select_bitmask(k, _mm512_fmaddsub_ph(a, b, c), c) }
@@ -7541,7 +7541,7 @@ pub const fn _mm512_mask3_fmaddsub_ph(a: __m512h, b: __m512h, c: __m512h, k: __m
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmaddsub))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_maskz_fmaddsub_ph(k: __mmask32, a: __m512h, b: __m512h, c: __m512h) -> __m512h {
unsafe { simd_select_bitmask(k, _mm512_fmaddsub_ph(a, b, c), _mm512_setzero_ph()) }
@@ -7563,7 +7563,7 @@ pub const fn _mm512_maskz_fmaddsub_ph(k: __mmask32, a: __m512h, b: __m512h, c: _
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_fmaddsub_round_ph<const ROUNDING: i32>(
a: __m512h,
b: __m512h,
@@ -7592,7 +7592,7 @@ pub fn _mm512_fmaddsub_round_ph<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_fmaddsub_round_ph<const ROUNDING: i32>(
a: __m512h,
k: __mmask32,
@@ -7622,7 +7622,7 @@ pub fn _mm512_mask_fmaddsub_round_ph<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask3_fmaddsub_round_ph<const ROUNDING: i32>(
a: __m512h,
b: __m512h,
@@ -7652,7 +7652,7 @@ pub fn _mm512_mask3_fmaddsub_round_ph<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_fmaddsub_round_ph<const ROUNDING: i32>(
k: __mmask32,
a: __m512h,
@@ -7676,7 +7676,7 @@ pub fn _mm512_maskz_fmaddsub_round_ph<const ROUNDING: i32>(
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsubadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_fmsubadd_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h {
_mm_fmaddsub_ph(a, b, unsafe { simd_neg(c) })
@@ -7690,7 +7690,7 @@ pub const fn _mm_fmsubadd_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsubadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_mask_fmsubadd_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h {
unsafe { simd_select_bitmask(k, _mm_fmsubadd_ph(a, b, c), a) }
@@ -7704,7 +7704,7 @@ pub const fn _mm_mask_fmsubadd_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsubadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_mask3_fmsubadd_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h {
unsafe { simd_select_bitmask(k, _mm_fmsubadd_ph(a, b, c), c) }
@@ -7718,7 +7718,7 @@ pub const fn _mm_mask3_fmsubadd_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmas
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsubadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_maskz_fmsubadd_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h {
unsafe { simd_select_bitmask(k, _mm_fmsubadd_ph(a, b, c), _mm_setzero_ph()) }
@@ -7731,7 +7731,7 @@ pub const fn _mm_maskz_fmsubadd_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m12
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsubadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_fmsubadd_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h {
_mm256_fmaddsub_ph(a, b, unsafe { simd_neg(c) })
@@ -7745,7 +7745,7 @@ pub const fn _mm256_fmsubadd_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsubadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_mask_fmsubadd_ph(a: __m256h, k: __mmask16, b: __m256h, c: __m256h) -> __m256h {
unsafe { simd_select_bitmask(k, _mm256_fmsubadd_ph(a, b, c), a) }
@@ -7759,7 +7759,7 @@ pub const fn _mm256_mask_fmsubadd_ph(a: __m256h, k: __mmask16, b: __m256h, c: __
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsubadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_mask3_fmsubadd_ph(a: __m256h, b: __m256h, c: __m256h, k: __mmask16) -> __m256h {
unsafe { simd_select_bitmask(k, _mm256_fmsubadd_ph(a, b, c), c) }
@@ -7773,7 +7773,7 @@ pub const fn _mm256_mask3_fmsubadd_ph(a: __m256h, b: __m256h, c: __m256h, k: __m
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsubadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_maskz_fmsubadd_ph(k: __mmask16, a: __m256h, b: __m256h, c: __m256h) -> __m256h {
unsafe { simd_select_bitmask(k, _mm256_fmsubadd_ph(a, b, c), _mm256_setzero_ph()) }
@@ -7786,7 +7786,7 @@ pub const fn _mm256_maskz_fmsubadd_ph(k: __mmask16, a: __m256h, b: __m256h, c: _
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmsubadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_fmsubadd_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h {
_mm512_fmaddsub_ph(a, b, unsafe { simd_neg(c) })
@@ -7800,7 +7800,7 @@ pub const fn _mm512_fmsubadd_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmsubadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_mask_fmsubadd_ph(a: __m512h, k: __mmask32, b: __m512h, c: __m512h) -> __m512h {
unsafe { simd_select_bitmask(k, _mm512_fmsubadd_ph(a, b, c), a) }
@@ -7814,7 +7814,7 @@ pub const fn _mm512_mask_fmsubadd_ph(a: __m512h, k: __mmask32, b: __m512h, c: __
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmsubadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_mask3_fmsubadd_ph(a: __m512h, b: __m512h, c: __m512h, k: __mmask32) -> __m512h {
unsafe { simd_select_bitmask(k, _mm512_fmsubadd_ph(a, b, c), c) }
@@ -7828,7 +7828,7 @@ pub const fn _mm512_mask3_fmsubadd_ph(a: __m512h, b: __m512h, c: __m512h, k: __m
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmsubadd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_maskz_fmsubadd_ph(k: __mmask32, a: __m512h, b: __m512h, c: __m512h) -> __m512h {
unsafe { simd_select_bitmask(k, _mm512_fmsubadd_ph(a, b, c), _mm512_setzero_ph()) }
@@ -7850,7 +7850,7 @@ pub const fn _mm512_maskz_fmsubadd_ph(k: __mmask32, a: __m512h, b: __m512h, c: _
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmsubadd, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_fmsubadd_round_ph<const ROUNDING: i32>(
a: __m512h,
b: __m512h,
@@ -7879,7 +7879,7 @@ pub fn _mm512_fmsubadd_round_ph<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmsubadd, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_fmsubadd_round_ph<const ROUNDING: i32>(
a: __m512h,
k: __mmask32,
@@ -7909,7 +7909,7 @@ pub fn _mm512_mask_fmsubadd_round_ph<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmsubadd, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask3_fmsubadd_round_ph<const ROUNDING: i32>(
a: __m512h,
b: __m512h,
@@ -7939,7 +7939,7 @@ pub fn _mm512_mask3_fmsubadd_round_ph<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfmsubadd, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_fmsubadd_round_ph<const ROUNDING: i32>(
k: __mmask32,
a: __m512h,
@@ -7963,7 +7963,7 @@ pub fn _mm512_maskz_fmsubadd_round_ph<const ROUNDING: i32>(
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vrcpph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_rcp_ph(a: __m128h) -> __m128h {
_mm_mask_rcp_ph(_mm_undefined_ph(), 0xff, a)
}
@@ -7976,7 +7976,7 @@ pub fn _mm_rcp_ph(a: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vrcpph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_rcp_ph(src: __m128h, k: __mmask8, a: __m128h) -> __m128h {
unsafe { vrcpph_128(a, src, k) }
}
@@ -7989,7 +7989,7 @@ pub fn _mm_mask_rcp_ph(src: __m128h, k: __mmask8, a: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vrcpph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_rcp_ph(k: __mmask8, a: __m128h) -> __m128h {
_mm_mask_rcp_ph(_mm_setzero_ph(), k, a)
}
@@ -8001,7 +8001,7 @@ pub fn _mm_maskz_rcp_ph(k: __mmask8, a: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vrcpph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_rcp_ph(a: __m256h) -> __m256h {
_mm256_mask_rcp_ph(_mm256_undefined_ph(), 0xffff, a)
}
@@ -8014,7 +8014,7 @@ pub fn _mm256_rcp_ph(a: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vrcpph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_rcp_ph(src: __m256h, k: __mmask16, a: __m256h) -> __m256h {
unsafe { vrcpph_256(a, src, k) }
}
@@ -8027,7 +8027,7 @@ pub fn _mm256_mask_rcp_ph(src: __m256h, k: __mmask16, a: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vrcpph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_rcp_ph(k: __mmask16, a: __m256h) -> __m256h {
_mm256_mask_rcp_ph(_mm256_setzero_ph(), k, a)
}
@@ -8039,7 +8039,7 @@ pub fn _mm256_maskz_rcp_ph(k: __mmask16, a: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vrcpph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_rcp_ph(a: __m512h) -> __m512h {
_mm512_mask_rcp_ph(_mm512_undefined_ph(), 0xffffffff, a)
}
@@ -8052,7 +8052,7 @@ pub fn _mm512_rcp_ph(a: __m512h) -> __m512h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vrcpph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_rcp_ph(src: __m512h, k: __mmask32, a: __m512h) -> __m512h {
unsafe { vrcpph_512(a, src, k) }
}
@@ -8065,7 +8065,7 @@ pub fn _mm512_mask_rcp_ph(src: __m512h, k: __mmask32, a: __m512h) -> __m512h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vrcpph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_rcp_ph(k: __mmask32, a: __m512h) -> __m512h {
_mm512_mask_rcp_ph(_mm512_setzero_ph(), k, a)
}
@@ -8079,7 +8079,7 @@ pub fn _mm512_maskz_rcp_ph(k: __mmask32, a: __m512h) -> __m512h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vrcpsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_rcp_sh(a: __m128h, b: __m128h) -> __m128h {
_mm_mask_rcp_sh(f16x8::ZERO.as_m128h(), 0xff, a, b)
}
@@ -8093,7 +8093,7 @@ pub fn _mm_rcp_sh(a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vrcpsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_rcp_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
unsafe { vrcpsh(a, b, src, k) }
}
@@ -8107,7 +8107,7 @@ pub fn _mm_mask_rcp_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vrcpsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_rcp_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
_mm_mask_rcp_sh(f16x8::ZERO.as_m128h(), k, a, b)
}
@@ -8120,7 +8120,7 @@ pub fn _mm_maskz_rcp_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vrsqrtph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_rsqrt_ph(a: __m128h) -> __m128h {
_mm_mask_rsqrt_ph(_mm_undefined_ph(), 0xff, a)
}
@@ -8134,7 +8134,7 @@ pub fn _mm_rsqrt_ph(a: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vrsqrtph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_rsqrt_ph(src: __m128h, k: __mmask8, a: __m128h) -> __m128h {
unsafe { vrsqrtph_128(a, src, k) }
}
@@ -8148,7 +8148,7 @@ pub fn _mm_mask_rsqrt_ph(src: __m128h, k: __mmask8, a: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vrsqrtph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_rsqrt_ph(k: __mmask8, a: __m128h) -> __m128h {
_mm_mask_rsqrt_ph(_mm_setzero_ph(), k, a)
}
@@ -8161,7 +8161,7 @@ pub fn _mm_maskz_rsqrt_ph(k: __mmask8, a: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vrsqrtph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_rsqrt_ph(a: __m256h) -> __m256h {
_mm256_mask_rsqrt_ph(_mm256_undefined_ph(), 0xffff, a)
}
@@ -8175,7 +8175,7 @@ pub fn _mm256_rsqrt_ph(a: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vrsqrtph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_rsqrt_ph(src: __m256h, k: __mmask16, a: __m256h) -> __m256h {
unsafe { vrsqrtph_256(a, src, k) }
}
@@ -8189,7 +8189,7 @@ pub fn _mm256_mask_rsqrt_ph(src: __m256h, k: __mmask16, a: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vrsqrtph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_rsqrt_ph(k: __mmask16, a: __m256h) -> __m256h {
_mm256_mask_rsqrt_ph(_mm256_setzero_ph(), k, a)
}
@@ -8202,7 +8202,7 @@ pub fn _mm256_maskz_rsqrt_ph(k: __mmask16, a: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vrsqrtph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_rsqrt_ph(a: __m512h) -> __m512h {
_mm512_mask_rsqrt_ph(_mm512_undefined_ph(), 0xffffffff, a)
}
@@ -8216,7 +8216,7 @@ pub fn _mm512_rsqrt_ph(a: __m512h) -> __m512h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vrsqrtph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_rsqrt_ph(src: __m512h, k: __mmask32, a: __m512h) -> __m512h {
unsafe { vrsqrtph_512(a, src, k) }
}
@@ -8230,7 +8230,7 @@ pub fn _mm512_mask_rsqrt_ph(src: __m512h, k: __mmask32, a: __m512h) -> __m512h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vrsqrtph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_rsqrt_ph(k: __mmask32, a: __m512h) -> __m512h {
_mm512_mask_rsqrt_ph(_mm512_setzero_ph(), k, a)
}
@@ -8244,7 +8244,7 @@ pub fn _mm512_maskz_rsqrt_ph(k: __mmask32, a: __m512h) -> __m512h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vrsqrtsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_rsqrt_sh(a: __m128h, b: __m128h) -> __m128h {
_mm_mask_rsqrt_sh(f16x8::ZERO.as_m128h(), 0xff, a, b)
}
@@ -8258,7 +8258,7 @@ pub fn _mm_rsqrt_sh(a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vrsqrtsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_rsqrt_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
unsafe { vrsqrtsh(a, b, src, k) }
}
@@ -8272,7 +8272,7 @@ pub fn _mm_mask_rsqrt_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> _
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vrsqrtsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_rsqrt_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
_mm_mask_rsqrt_sh(f16x8::ZERO.as_m128h(), k, a, b)
}
@@ -8284,7 +8284,7 @@ pub fn _mm_maskz_rsqrt_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vsqrtph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_sqrt_ph(a: __m128h) -> __m128h {
unsafe { simd_fsqrt(a) }
}
@@ -8296,7 +8296,7 @@ pub fn _mm_sqrt_ph(a: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vsqrtph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_sqrt_ph(src: __m128h, k: __mmask8, a: __m128h) -> __m128h {
unsafe { simd_select_bitmask(k, _mm_sqrt_ph(a), src) }
}
@@ -8308,7 +8308,7 @@ pub fn _mm_mask_sqrt_ph(src: __m128h, k: __mmask8, a: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vsqrtph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_sqrt_ph(k: __mmask8, a: __m128h) -> __m128h {
unsafe { simd_select_bitmask(k, _mm_sqrt_ph(a), _mm_setzero_ph()) }
}
@@ -8320,7 +8320,7 @@ pub fn _mm_maskz_sqrt_ph(k: __mmask8, a: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vsqrtph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_sqrt_ph(a: __m256h) -> __m256h {
unsafe { simd_fsqrt(a) }
}
@@ -8332,7 +8332,7 @@ pub fn _mm256_sqrt_ph(a: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vsqrtph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_sqrt_ph(src: __m256h, k: __mmask16, a: __m256h) -> __m256h {
unsafe { simd_select_bitmask(k, _mm256_sqrt_ph(a), src) }
}
@@ -8344,7 +8344,7 @@ pub fn _mm256_mask_sqrt_ph(src: __m256h, k: __mmask16, a: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vsqrtph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_sqrt_ph(k: __mmask16, a: __m256h) -> __m256h {
unsafe { simd_select_bitmask(k, _mm256_sqrt_ph(a), _mm256_setzero_ph()) }
}
@@ -8356,7 +8356,7 @@ pub fn _mm256_maskz_sqrt_ph(k: __mmask16, a: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vsqrtph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_sqrt_ph(a: __m512h) -> __m512h {
unsafe { simd_fsqrt(a) }
}
@@ -8368,7 +8368,7 @@ pub fn _mm512_sqrt_ph(a: __m512h) -> __m512h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vsqrtph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_sqrt_ph(src: __m512h, k: __mmask32, a: __m512h) -> __m512h {
unsafe { simd_select_bitmask(k, _mm512_sqrt_ph(a), src) }
}
@@ -8380,7 +8380,7 @@ pub fn _mm512_mask_sqrt_ph(src: __m512h, k: __mmask32, a: __m512h) -> __m512h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vsqrtph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_sqrt_ph(k: __mmask32, a: __m512h) -> __m512h {
unsafe { simd_select_bitmask(k, _mm512_sqrt_ph(a), _mm512_setzero_ph()) }
}
@@ -8400,7 +8400,7 @@ pub fn _mm512_maskz_sqrt_ph(k: __mmask32, a: __m512h) -> __m512h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vsqrtph, ROUNDING = 8))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_sqrt_round_ph<const ROUNDING: i32>(a: __m512h) -> __m512h {
unsafe {
static_assert_rounding!(ROUNDING);
@@ -8423,7 +8423,7 @@ pub fn _mm512_sqrt_round_ph<const ROUNDING: i32>(a: __m512h) -> __m512h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vsqrtph, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_sqrt_round_ph<const ROUNDING: i32>(
src: __m512h,
k: __mmask32,
@@ -8450,7 +8450,7 @@ pub fn _mm512_mask_sqrt_round_ph<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vsqrtph, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_sqrt_round_ph<const ROUNDING: i32>(k: __mmask32, a: __m512h) -> __m512h {
unsafe {
static_assert_rounding!(ROUNDING);
@@ -8466,7 +8466,7 @@ pub fn _mm512_maskz_sqrt_round_ph<const ROUNDING: i32>(k: __mmask32, a: __m512h)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vsqrtsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_sqrt_sh(a: __m128h, b: __m128h) -> __m128h {
_mm_mask_sqrt_sh(f16x8::ZERO.as_m128h(), 0xff, a, b)
}
@@ -8479,7 +8479,7 @@ pub fn _mm_sqrt_sh(a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vsqrtsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_sqrt_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
_mm_mask_sqrt_round_sh::<_MM_FROUND_CUR_DIRECTION>(src, k, a, b)
}
@@ -8492,7 +8492,7 @@ pub fn _mm_mask_sqrt_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vsqrtsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_sqrt_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
_mm_mask_sqrt_sh(f16x8::ZERO.as_m128h(), k, a, b)
}
@@ -8513,7 +8513,7 @@ pub fn _mm_maskz_sqrt_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vsqrtsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_sqrt_round_sh<const ROUNDING: i32>(a: __m128h, b: __m128h) -> __m128h {
static_assert_rounding!(ROUNDING);
_mm_mask_sqrt_round_sh::<ROUNDING>(f16x8::ZERO.as_m128h(), 0xff, a, b)
@@ -8535,7 +8535,7 @@ pub fn _mm_sqrt_round_sh<const ROUNDING: i32>(a: __m128h, b: __m128h) -> __m128h
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vsqrtsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_sqrt_round_sh<const ROUNDING: i32>(
src: __m128h,
k: __mmask8,
@@ -8564,7 +8564,7 @@ pub fn _mm_mask_sqrt_round_sh<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vsqrtsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_sqrt_round_sh<const ROUNDING: i32>(
k: __mmask8,
a: __m128h,
@@ -8582,7 +8582,7 @@ pub fn _mm_maskz_sqrt_round_sh<const ROUNDING: i32>(
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vmaxph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_max_ph(a: __m128h, b: __m128h) -> __m128h {
unsafe { vmaxph_128(a, b) }
}
@@ -8596,7 +8596,7 @@ pub fn _mm_max_ph(a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vmaxph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_max_ph(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
unsafe { simd_select_bitmask(k, _mm_max_ph(a, b), src) }
}
@@ -8610,7 +8610,7 @@ pub fn _mm_mask_max_ph(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vmaxph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_max_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
unsafe { simd_select_bitmask(k, _mm_max_ph(a, b), _mm_setzero_ph()) }
}
@@ -8623,7 +8623,7 @@ pub fn _mm_maskz_max_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vmaxph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_max_ph(a: __m256h, b: __m256h) -> __m256h {
unsafe { vmaxph_256(a, b) }
}
@@ -8637,7 +8637,7 @@ pub fn _mm256_max_ph(a: __m256h, b: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vmaxph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_max_ph(src: __m256h, k: __mmask16, a: __m256h, b: __m256h) -> __m256h {
unsafe { simd_select_bitmask(k, _mm256_max_ph(a, b), src) }
}
@@ -8651,7 +8651,7 @@ pub fn _mm256_mask_max_ph(src: __m256h, k: __mmask16, a: __m256h, b: __m256h) ->
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vmaxph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_max_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256h {
unsafe { simd_select_bitmask(k, _mm256_max_ph(a, b), _mm256_setzero_ph()) }
}
@@ -8664,7 +8664,7 @@ pub fn _mm256_maskz_max_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vmaxph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_max_ph(a: __m512h, b: __m512h) -> __m512h {
_mm512_max_round_ph::<_MM_FROUND_CUR_DIRECTION>(a, b)
}
@@ -8678,7 +8678,7 @@ pub fn _mm512_max_ph(a: __m512h, b: __m512h) -> __m512h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vmaxph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_max_ph(src: __m512h, k: __mmask32, a: __m512h, b: __m512h) -> __m512h {
unsafe { simd_select_bitmask(k, _mm512_max_ph(a, b), src) }
}
@@ -8692,7 +8692,7 @@ pub fn _mm512_mask_max_ph(src: __m512h, k: __mmask32, a: __m512h, b: __m512h) ->
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vmaxph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_max_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h {
unsafe { simd_select_bitmask(k, _mm512_max_ph(a, b), _mm512_setzero_ph()) }
}
@@ -8707,7 +8707,7 @@ pub fn _mm512_maskz_max_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vmaxph, SAE = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_max_round_ph<const SAE: i32>(a: __m512h, b: __m512h) -> __m512h {
unsafe {
static_assert_sae!(SAE);
@@ -8725,7 +8725,7 @@ pub fn _mm512_max_round_ph<const SAE: i32>(a: __m512h, b: __m512h) -> __m512h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vmaxph, SAE = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_max_round_ph<const SAE: i32>(
src: __m512h,
k: __mmask32,
@@ -8748,7 +8748,7 @@ pub fn _mm512_mask_max_round_ph<const SAE: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vmaxph, SAE = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_max_round_ph<const SAE: i32>(k: __mmask32, a: __m512h, b: __m512h) -> __m512h {
unsafe {
static_assert_sae!(SAE);
@@ -8765,7 +8765,7 @@ pub fn _mm512_maskz_max_round_ph<const SAE: i32>(k: __mmask32, a: __m512h, b: __
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vmaxsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_max_sh(a: __m128h, b: __m128h) -> __m128h {
_mm_mask_max_sh(_mm_undefined_ph(), 0xff, a, b)
}
@@ -8779,7 +8779,7 @@ pub fn _mm_max_sh(a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vmaxsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_max_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
_mm_mask_max_round_sh::<_MM_FROUND_CUR_DIRECTION>(src, k, a, b)
}
@@ -8793,7 +8793,7 @@ pub fn _mm_mask_max_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vmaxsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_max_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
_mm_mask_max_sh(f16x8::ZERO.as_m128h(), k, a, b)
}
@@ -8808,7 +8808,7 @@ pub fn _mm_maskz_max_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vmaxsh, SAE = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_max_round_sh<const SAE: i32>(a: __m128h, b: __m128h) -> __m128h {
static_assert_sae!(SAE);
_mm_mask_max_round_sh::<SAE>(_mm_undefined_ph(), 0xff, a, b)
@@ -8825,7 +8825,7 @@ pub fn _mm_max_round_sh<const SAE: i32>(a: __m128h, b: __m128h) -> __m128h {
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vmaxsh, SAE = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_max_round_sh<const SAE: i32>(
src: __m128h,
k: __mmask8,
@@ -8849,7 +8849,7 @@ pub fn _mm_mask_max_round_sh<const SAE: i32>(
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vmaxsh, SAE = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_max_round_sh<const SAE: i32>(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
static_assert_sae!(SAE);
_mm_mask_max_round_sh::<SAE>(f16x8::ZERO.as_m128h(), k, a, b)
@@ -8863,7 +8863,7 @@ pub fn _mm_maskz_max_round_sh<const SAE: i32>(k: __mmask8, a: __m128h, b: __m128
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vminph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_min_ph(a: __m128h, b: __m128h) -> __m128h {
unsafe { vminph_128(a, b) }
}
@@ -8877,7 +8877,7 @@ pub fn _mm_min_ph(a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vminph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_min_ph(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
unsafe { simd_select_bitmask(k, _mm_min_ph(a, b), src) }
}
@@ -8891,7 +8891,7 @@ pub fn _mm_mask_min_ph(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vminph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_min_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
unsafe { simd_select_bitmask(k, _mm_min_ph(a, b), _mm_setzero_ph()) }
}
@@ -8904,7 +8904,7 @@ pub fn _mm_maskz_min_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vminph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_min_ph(a: __m256h, b: __m256h) -> __m256h {
unsafe { vminph_256(a, b) }
}
@@ -8918,7 +8918,7 @@ pub fn _mm256_min_ph(a: __m256h, b: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vminph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_min_ph(src: __m256h, k: __mmask16, a: __m256h, b: __m256h) -> __m256h {
unsafe { simd_select_bitmask(k, _mm256_min_ph(a, b), src) }
}
@@ -8932,7 +8932,7 @@ pub fn _mm256_mask_min_ph(src: __m256h, k: __mmask16, a: __m256h, b: __m256h) ->
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vminph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_min_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256h {
unsafe { simd_select_bitmask(k, _mm256_min_ph(a, b), _mm256_setzero_ph()) }
}
@@ -8945,7 +8945,7 @@ pub fn _mm256_maskz_min_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vminph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_min_ph(a: __m512h, b: __m512h) -> __m512h {
_mm512_min_round_ph::<_MM_FROUND_CUR_DIRECTION>(a, b)
}
@@ -8959,7 +8959,7 @@ pub fn _mm512_min_ph(a: __m512h, b: __m512h) -> __m512h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vminph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_min_ph(src: __m512h, k: __mmask32, a: __m512h, b: __m512h) -> __m512h {
unsafe { simd_select_bitmask(k, _mm512_min_ph(a, b), src) }
}
@@ -8973,7 +8973,7 @@ pub fn _mm512_mask_min_ph(src: __m512h, k: __mmask32, a: __m512h, b: __m512h) ->
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vminph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_min_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h {
unsafe { simd_select_bitmask(k, _mm512_min_ph(a, b), _mm512_setzero_ph()) }
}
@@ -8987,7 +8987,7 @@ pub fn _mm512_maskz_min_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vminph, SAE = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_min_round_ph<const SAE: i32>(a: __m512h, b: __m512h) -> __m512h {
unsafe {
static_assert_sae!(SAE);
@@ -9005,7 +9005,7 @@ pub fn _mm512_min_round_ph<const SAE: i32>(a: __m512h, b: __m512h) -> __m512h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vminph, SAE = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_min_round_ph<const SAE: i32>(
src: __m512h,
k: __mmask32,
@@ -9028,7 +9028,7 @@ pub fn _mm512_mask_min_round_ph<const SAE: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vminph, SAE = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_min_round_ph<const SAE: i32>(k: __mmask32, a: __m512h, b: __m512h) -> __m512h {
unsafe {
static_assert_sae!(SAE);
@@ -9045,7 +9045,7 @@ pub fn _mm512_maskz_min_round_ph<const SAE: i32>(k: __mmask32, a: __m512h, b: __
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vminsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_min_sh(a: __m128h, b: __m128h) -> __m128h {
_mm_mask_min_sh(_mm_undefined_ph(), 0xff, a, b)
}
@@ -9059,7 +9059,7 @@ pub fn _mm_min_sh(a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vminsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_min_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
_mm_mask_min_round_sh::<_MM_FROUND_CUR_DIRECTION>(src, k, a, b)
}
@@ -9073,7 +9073,7 @@ pub fn _mm_mask_min_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vminsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_min_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
_mm_mask_min_sh(f16x8::ZERO.as_m128h(), k, a, b)
}
@@ -9088,7 +9088,7 @@ pub fn _mm_maskz_min_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vminsh, SAE = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_min_round_sh<const SAE: i32>(a: __m128h, b: __m128h) -> __m128h {
static_assert_sae!(SAE);
_mm_mask_min_round_sh::<SAE>(_mm_undefined_ph(), 0xff, a, b)
@@ -9105,7 +9105,7 @@ pub fn _mm_min_round_sh<const SAE: i32>(a: __m128h, b: __m128h) -> __m128h {
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vminsh, SAE = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_min_round_sh<const SAE: i32>(
src: __m128h,
k: __mmask8,
@@ -9129,7 +9129,7 @@ pub fn _mm_mask_min_round_sh<const SAE: i32>(
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vminsh, SAE = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_min_round_sh<const SAE: i32>(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
static_assert_sae!(SAE);
_mm_mask_min_round_sh::<SAE>(f16x8::ZERO.as_m128h(), k, a, b)
@@ -9143,7 +9143,7 @@ pub fn _mm_maskz_min_round_sh<const SAE: i32>(k: __mmask8, a: __m128h, b: __m128
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vgetexpph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_getexp_ph(a: __m128h) -> __m128h {
_mm_mask_getexp_ph(_mm_undefined_ph(), 0xff, a)
}
@@ -9157,7 +9157,7 @@ pub fn _mm_getexp_ph(a: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vgetexpph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_getexp_ph(src: __m128h, k: __mmask8, a: __m128h) -> __m128h {
unsafe { vgetexpph_128(a, src, k) }
}
@@ -9171,7 +9171,7 @@ pub fn _mm_mask_getexp_ph(src: __m128h, k: __mmask8, a: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vgetexpph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_getexp_ph(k: __mmask8, a: __m128h) -> __m128h {
_mm_mask_getexp_ph(_mm_setzero_ph(), k, a)
}
@@ -9184,7 +9184,7 @@ pub fn _mm_maskz_getexp_ph(k: __mmask8, a: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vgetexpph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_getexp_ph(a: __m256h) -> __m256h {
_mm256_mask_getexp_ph(_mm256_undefined_ph(), 0xffff, a)
}
@@ -9198,7 +9198,7 @@ pub fn _mm256_getexp_ph(a: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vgetexpph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_getexp_ph(src: __m256h, k: __mmask16, a: __m256h) -> __m256h {
unsafe { vgetexpph_256(a, src, k) }
}
@@ -9212,7 +9212,7 @@ pub fn _mm256_mask_getexp_ph(src: __m256h, k: __mmask16, a: __m256h) -> __m256h
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vgetexpph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_getexp_ph(k: __mmask16, a: __m256h) -> __m256h {
_mm256_mask_getexp_ph(_mm256_setzero_ph(), k, a)
}
@@ -9225,7 +9225,7 @@ pub fn _mm256_maskz_getexp_ph(k: __mmask16, a: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vgetexpph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_getexp_ph(a: __m512h) -> __m512h {
_mm512_mask_getexp_ph(_mm512_undefined_ph(), 0xffffffff, a)
}
@@ -9239,7 +9239,7 @@ pub fn _mm512_getexp_ph(a: __m512h) -> __m512h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vgetexpph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_getexp_ph(src: __m512h, k: __mmask32, a: __m512h) -> __m512h {
_mm512_mask_getexp_round_ph::<_MM_FROUND_CUR_DIRECTION>(src, k, a)
}
@@ -9253,7 +9253,7 @@ pub fn _mm512_mask_getexp_ph(src: __m512h, k: __mmask32, a: __m512h) -> __m512h
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vgetexpph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_getexp_ph(k: __mmask32, a: __m512h) -> __m512h {
_mm512_mask_getexp_ph(_mm512_setzero_ph(), k, a)
}
@@ -9268,7 +9268,7 @@ pub fn _mm512_maskz_getexp_ph(k: __mmask32, a: __m512h) -> __m512h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vgetexpph, SAE = 8))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_getexp_round_ph<const SAE: i32>(a: __m512h) -> __m512h {
static_assert_sae!(SAE);
_mm512_mask_getexp_round_ph::<SAE>(_mm512_undefined_ph(), 0xffffffff, a)
@@ -9284,7 +9284,7 @@ pub fn _mm512_getexp_round_ph<const SAE: i32>(a: __m512h) -> __m512h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vgetexpph, SAE = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_getexp_round_ph<const SAE: i32>(
src: __m512h,
k: __mmask32,
@@ -9306,7 +9306,7 @@ pub fn _mm512_mask_getexp_round_ph<const SAE: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vgetexpph, SAE = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_getexp_round_ph<const SAE: i32>(k: __mmask32, a: __m512h) -> __m512h {
static_assert_sae!(SAE);
_mm512_mask_getexp_round_ph::<SAE>(_mm512_setzero_ph(), k, a)
@@ -9321,7 +9321,7 @@ pub fn _mm512_maskz_getexp_round_ph<const SAE: i32>(k: __mmask32, a: __m512h) ->
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vgetexpsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_getexp_sh(a: __m128h, b: __m128h) -> __m128h {
_mm_mask_getexp_sh(f16x8::ZERO.as_m128h(), 0xff, a, b)
}
@@ -9336,7 +9336,7 @@ pub fn _mm_getexp_sh(a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vgetexpsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_getexp_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
_mm_mask_getexp_round_sh::<_MM_FROUND_CUR_DIRECTION>(src, k, a, b)
}
@@ -9351,7 +9351,7 @@ pub fn _mm_mask_getexp_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) ->
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vgetexpsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_getexp_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
_mm_mask_getexp_sh(f16x8::ZERO.as_m128h(), k, a, b)
}
@@ -9367,7 +9367,7 @@ pub fn _mm_maskz_getexp_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vgetexpsh, SAE = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_getexp_round_sh<const SAE: i32>(a: __m128h, b: __m128h) -> __m128h {
static_assert_sae!(SAE);
_mm_mask_getexp_round_sh::<SAE>(f16x8::ZERO.as_m128h(), 0xff, a, b)
@@ -9384,7 +9384,7 @@ pub fn _mm_getexp_round_sh<const SAE: i32>(a: __m128h, b: __m128h) -> __m128h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vgetexpsh, SAE = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_getexp_round_sh<const SAE: i32>(
src: __m128h,
k: __mmask8,
@@ -9408,7 +9408,7 @@ pub fn _mm_mask_getexp_round_sh<const SAE: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vgetexpsh, SAE = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_getexp_round_sh<const SAE: i32>(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
static_assert_sae!(SAE);
_mm_mask_getexp_round_sh::<SAE>(f16x8::ZERO.as_m128h(), k, a, b)
@@ -9436,7 +9436,7 @@ pub fn _mm_maskz_getexp_round_sh<const SAE: i32>(k: __mmask8, a: __m128h, b: __m
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vgetmantph, NORM = 0, SIGN = 0))]
#[rustc_legacy_const_generics(1, 2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_getmant_ph<const NORM: _MM_MANTISSA_NORM_ENUM, const SIGN: _MM_MANTISSA_SIGN_ENUM>(
a: __m128h,
) -> __m128h {
@@ -9468,7 +9468,7 @@ pub fn _mm_getmant_ph<const NORM: _MM_MANTISSA_NORM_ENUM, const SIGN: _MM_MANTIS
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vgetmantph, NORM = 0, SIGN = 0))]
#[rustc_legacy_const_generics(3, 4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_getmant_ph<
const NORM: _MM_MANTISSA_NORM_ENUM,
const SIGN: _MM_MANTISSA_SIGN_ENUM,
@@ -9507,7 +9507,7 @@ pub fn _mm_mask_getmant_ph<
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vgetmantph, NORM = 0, SIGN = 0))]
#[rustc_legacy_const_generics(2, 3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_getmant_ph<
const NORM: _MM_MANTISSA_NORM_ENUM,
const SIGN: _MM_MANTISSA_SIGN_ENUM,
@@ -9542,7 +9542,7 @@ pub fn _mm_maskz_getmant_ph<
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vgetmantph, NORM = 0, SIGN = 0))]
#[rustc_legacy_const_generics(1, 2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_getmant_ph<const NORM: _MM_MANTISSA_NORM_ENUM, const SIGN: _MM_MANTISSA_SIGN_ENUM>(
a: __m256h,
) -> __m256h {
@@ -9574,7 +9574,7 @@ pub fn _mm256_getmant_ph<const NORM: _MM_MANTISSA_NORM_ENUM, const SIGN: _MM_MAN
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vgetmantph, NORM = 0, SIGN = 0))]
#[rustc_legacy_const_generics(3, 4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_getmant_ph<
const NORM: _MM_MANTISSA_NORM_ENUM,
const SIGN: _MM_MANTISSA_SIGN_ENUM,
@@ -9613,7 +9613,7 @@ pub fn _mm256_mask_getmant_ph<
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vgetmantph, NORM = 0, SIGN = 0))]
#[rustc_legacy_const_generics(2, 3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_getmant_ph<
const NORM: _MM_MANTISSA_NORM_ENUM,
const SIGN: _MM_MANTISSA_SIGN_ENUM,
@@ -9648,7 +9648,7 @@ pub fn _mm256_maskz_getmant_ph<
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vgetmantph, NORM = 0, SIGN = 0))]
#[rustc_legacy_const_generics(1, 2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_getmant_ph<const NORM: _MM_MANTISSA_NORM_ENUM, const SIGN: _MM_MANTISSA_SIGN_ENUM>(
a: __m512h,
) -> __m512h {
@@ -9680,7 +9680,7 @@ pub fn _mm512_getmant_ph<const NORM: _MM_MANTISSA_NORM_ENUM, const SIGN: _MM_MAN
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vgetmantph, NORM = 0, SIGN = 0))]
#[rustc_legacy_const_generics(3, 4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_getmant_ph<
const NORM: _MM_MANTISSA_NORM_ENUM,
const SIGN: _MM_MANTISSA_SIGN_ENUM,
@@ -9717,7 +9717,7 @@ pub fn _mm512_mask_getmant_ph<
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vgetmantph, NORM = 0, SIGN = 0))]
#[rustc_legacy_const_generics(2, 3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_getmant_ph<
const NORM: _MM_MANTISSA_NORM_ENUM,
const SIGN: _MM_MANTISSA_SIGN_ENUM,
@@ -9755,7 +9755,7 @@ pub fn _mm512_maskz_getmant_ph<
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vgetmantph, NORM = 0, SIGN = 0, SAE = 8))]
#[rustc_legacy_const_generics(1, 2, 3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_getmant_round_ph<
const NORM: _MM_MANTISSA_NORM_ENUM,
const SIGN: _MM_MANTISSA_SIGN_ENUM,
@@ -9795,7 +9795,7 @@ pub fn _mm512_getmant_round_ph<
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vgetmantph, NORM = 0, SIGN = 0, SAE = 8))]
#[rustc_legacy_const_generics(3, 4, 5)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_getmant_round_ph<
const NORM: _MM_MANTISSA_NORM_ENUM,
const SIGN: _MM_MANTISSA_SIGN_ENUM,
@@ -9839,7 +9839,7 @@ pub fn _mm512_mask_getmant_round_ph<
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vgetmantph, NORM = 0, SIGN = 0, SAE = 8))]
#[rustc_legacy_const_generics(2, 3, 4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_getmant_round_ph<
const NORM: _MM_MANTISSA_NORM_ENUM,
const SIGN: _MM_MANTISSA_SIGN_ENUM,
@@ -9877,7 +9877,7 @@ pub fn _mm512_maskz_getmant_round_ph<
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vgetmantsh, NORM = 0, SIGN = 0))]
#[rustc_legacy_const_generics(2, 3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_getmant_sh<const NORM: _MM_MANTISSA_NORM_ENUM, const SIGN: _MM_MANTISSA_SIGN_ENUM>(
a: __m128h,
b: __m128h,
@@ -9911,7 +9911,7 @@ pub fn _mm_getmant_sh<const NORM: _MM_MANTISSA_NORM_ENUM, const SIGN: _MM_MANTIS
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vgetmantsh, NORM = 0, SIGN = 0))]
#[rustc_legacy_const_generics(4, 5)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_getmant_sh<
const NORM: _MM_MANTISSA_NORM_ENUM,
const SIGN: _MM_MANTISSA_SIGN_ENUM,
@@ -9950,7 +9950,7 @@ pub fn _mm_mask_getmant_sh<
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vgetmantsh, NORM = 0, SIGN = 0))]
#[rustc_legacy_const_generics(3, 4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_getmant_sh<
const NORM: _MM_MANTISSA_NORM_ENUM,
const SIGN: _MM_MANTISSA_SIGN_ENUM,
@@ -9990,7 +9990,7 @@ pub fn _mm_maskz_getmant_sh<
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vgetmantsh, NORM = 0, SIGN = 0, SAE = 8))]
#[rustc_legacy_const_generics(2, 3, 4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_getmant_round_sh<
const NORM: _MM_MANTISSA_NORM_ENUM,
const SIGN: _MM_MANTISSA_SIGN_ENUM,
@@ -10031,7 +10031,7 @@ pub fn _mm_getmant_round_sh<
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vgetmantsh, NORM = 0, SIGN = 0, SAE = 8))]
#[rustc_legacy_const_generics(4, 5, 6)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_getmant_round_sh<
const NORM: _MM_MANTISSA_NORM_ENUM,
const SIGN: _MM_MANTISSA_SIGN_ENUM,
@@ -10076,7 +10076,7 @@ pub fn _mm_mask_getmant_round_sh<
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vgetmantsh, NORM = 0, SIGN = 0, SAE = 8))]
#[rustc_legacy_const_generics(3, 4, 5)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_getmant_round_sh<
const NORM: _MM_MANTISSA_NORM_ENUM,
const SIGN: _MM_MANTISSA_SIGN_ENUM,
@@ -10108,7 +10108,7 @@ pub fn _mm_maskz_getmant_round_sh<
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_roundscale_ph<const IMM8: i32>(a: __m128h) -> __m128h {
static_assert_uimm_bits!(IMM8, 8);
_mm_mask_roundscale_ph::<IMM8>(_mm_undefined_ph(), 0xff, a)
@@ -10131,7 +10131,7 @@ pub fn _mm_roundscale_ph<const IMM8: i32>(a: __m128h) -> __m128h {
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_roundscale_ph<const IMM8: i32>(src: __m128h, k: __mmask8, a: __m128h) -> __m128h {
unsafe {
static_assert_uimm_bits!(IMM8, 8);
@@ -10156,7 +10156,7 @@ pub fn _mm_mask_roundscale_ph<const IMM8: i32>(src: __m128h, k: __mmask8, a: __m
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_roundscale_ph<const IMM8: i32>(k: __mmask8, a: __m128h) -> __m128h {
static_assert_uimm_bits!(IMM8, 8);
_mm_mask_roundscale_ph::<IMM8>(_mm_setzero_ph(), k, a)
@@ -10178,7 +10178,7 @@ pub fn _mm_maskz_roundscale_ph<const IMM8: i32>(k: __mmask8, a: __m128h) -> __m1
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_roundscale_ph<const IMM8: i32>(a: __m256h) -> __m256h {
static_assert_uimm_bits!(IMM8, 8);
_mm256_mask_roundscale_ph::<IMM8>(_mm256_undefined_ph(), 0xffff, a)
@@ -10201,7 +10201,7 @@ pub fn _mm256_roundscale_ph<const IMM8: i32>(a: __m256h) -> __m256h {
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_roundscale_ph<const IMM8: i32>(
src: __m256h,
k: __mmask16,
@@ -10230,7 +10230,7 @@ pub fn _mm256_mask_roundscale_ph<const IMM8: i32>(
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_roundscale_ph<const IMM8: i32>(k: __mmask16, a: __m256h) -> __m256h {
static_assert_uimm_bits!(IMM8, 8);
_mm256_mask_roundscale_ph::<IMM8>(_mm256_setzero_ph(), k, a)
@@ -10252,7 +10252,7 @@ pub fn _mm256_maskz_roundscale_ph<const IMM8: i32>(k: __mmask16, a: __m256h) ->
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_roundscale_ph<const IMM8: i32>(a: __m512h) -> __m512h {
static_assert_uimm_bits!(IMM8, 8);
_mm512_mask_roundscale_ph::<IMM8>(_mm512_undefined_ph(), 0xffffffff, a)
@@ -10275,7 +10275,7 @@ pub fn _mm512_roundscale_ph<const IMM8: i32>(a: __m512h) -> __m512h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_roundscale_ph<const IMM8: i32>(
src: __m512h,
k: __mmask32,
@@ -10302,7 +10302,7 @@ pub fn _mm512_mask_roundscale_ph<const IMM8: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_roundscale_ph<const IMM8: i32>(k: __mmask32, a: __m512h) -> __m512h {
static_assert_uimm_bits!(IMM8, 8);
_mm512_mask_roundscale_ph::<IMM8>(_mm512_setzero_ph(), k, a)
@@ -10325,7 +10325,7 @@ pub fn _mm512_maskz_roundscale_ph<const IMM8: i32>(k: __mmask32, a: __m512h) ->
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0, SAE = 8))]
#[rustc_legacy_const_generics(1, 2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_roundscale_round_ph<const IMM8: i32, const SAE: i32>(a: __m512h) -> __m512h {
static_assert_uimm_bits!(IMM8, 8);
static_assert_sae!(SAE);
@@ -10350,7 +10350,7 @@ pub fn _mm512_roundscale_round_ph<const IMM8: i32, const SAE: i32>(a: __m512h) -
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0, SAE = 8))]
#[rustc_legacy_const_generics(3, 4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_roundscale_round_ph<const IMM8: i32, const SAE: i32>(
src: __m512h,
k: __mmask32,
@@ -10380,7 +10380,7 @@ pub fn _mm512_mask_roundscale_round_ph<const IMM8: i32, const SAE: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0, SAE = 8))]
#[rustc_legacy_const_generics(2, 3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_roundscale_round_ph<const IMM8: i32, const SAE: i32>(
k: __mmask32,
a: __m512h,
@@ -10407,7 +10407,7 @@ pub fn _mm512_maskz_roundscale_round_ph<const IMM8: i32, const SAE: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vrndscalesh, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_roundscale_sh<const IMM8: i32>(a: __m128h, b: __m128h) -> __m128h {
static_assert_uimm_bits!(IMM8, 8);
_mm_mask_roundscale_sh::<IMM8>(f16x8::ZERO.as_m128h(), 0xff, a, b)
@@ -10430,7 +10430,7 @@ pub fn _mm_roundscale_sh<const IMM8: i32>(a: __m128h, b: __m128h) -> __m128h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vrndscalesh, IMM8 = 0))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_roundscale_sh<const IMM8: i32>(
src: __m128h,
k: __mmask8,
@@ -10458,7 +10458,7 @@ pub fn _mm_mask_roundscale_sh<const IMM8: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vrndscalesh, IMM8 = 0))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_roundscale_sh<const IMM8: i32>(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
static_assert_uimm_bits!(IMM8, 8);
_mm_mask_roundscale_sh::<IMM8>(f16x8::ZERO.as_m128h(), k, a, b)
@@ -10483,7 +10483,7 @@ pub fn _mm_maskz_roundscale_sh<const IMM8: i32>(k: __mmask8, a: __m128h, b: __m1
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vrndscalesh, IMM8 = 0, SAE = 8))]
#[rustc_legacy_const_generics(2, 3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_roundscale_round_sh<const IMM8: i32, const SAE: i32>(a: __m128h, b: __m128h) -> __m128h {
static_assert_uimm_bits!(IMM8, 8);
static_assert_sae!(SAE);
@@ -10509,7 +10509,7 @@ pub fn _mm_roundscale_round_sh<const IMM8: i32, const SAE: i32>(a: __m128h, b: _
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vrndscalesh, IMM8 = 0, SAE = 8))]
#[rustc_legacy_const_generics(4, 5)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_roundscale_round_sh<const IMM8: i32, const SAE: i32>(
src: __m128h,
k: __mmask8,
@@ -10542,7 +10542,7 @@ pub fn _mm_mask_roundscale_round_sh<const IMM8: i32, const SAE: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vrndscalesh, IMM8 = 0, SAE = 8))]
#[rustc_legacy_const_generics(3, 4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_roundscale_round_sh<const IMM8: i32, const SAE: i32>(
k: __mmask8,
a: __m128h,
@@ -10560,7 +10560,7 @@ pub fn _mm_maskz_roundscale_round_sh<const IMM8: i32, const SAE: i32>(
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vscalefph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_scalef_ph(a: __m128h, b: __m128h) -> __m128h {
_mm_mask_scalef_ph(_mm_undefined_ph(), 0xff, a, b)
}
@@ -10572,7 +10572,7 @@ pub fn _mm_scalef_ph(a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vscalefph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_scalef_ph(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
unsafe { vscalefph_128(a, b, src, k) }
}
@@ -10584,7 +10584,7 @@ pub fn _mm_mask_scalef_ph(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) ->
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vscalefph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_scalef_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
_mm_mask_scalef_ph(_mm_setzero_ph(), k, a, b)
}
@@ -10596,7 +10596,7 @@ pub fn _mm_maskz_scalef_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vscalefph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_scalef_ph(a: __m256h, b: __m256h) -> __m256h {
_mm256_mask_scalef_ph(_mm256_undefined_ph(), 0xffff, a, b)
}
@@ -10608,7 +10608,7 @@ pub fn _mm256_scalef_ph(a: __m256h, b: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vscalefph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_scalef_ph(src: __m256h, k: __mmask16, a: __m256h, b: __m256h) -> __m256h {
unsafe { vscalefph_256(a, b, src, k) }
}
@@ -10620,7 +10620,7 @@ pub fn _mm256_mask_scalef_ph(src: __m256h, k: __mmask16, a: __m256h, b: __m256h)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vscalefph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_scalef_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256h {
_mm256_mask_scalef_ph(_mm256_setzero_ph(), k, a, b)
}
@@ -10632,7 +10632,7 @@ pub fn _mm256_maskz_scalef_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vscalefph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_scalef_ph(a: __m512h, b: __m512h) -> __m512h {
_mm512_mask_scalef_ph(_mm512_undefined_ph(), 0xffffffff, a, b)
}
@@ -10644,7 +10644,7 @@ pub fn _mm512_scalef_ph(a: __m512h, b: __m512h) -> __m512h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vscalefph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_scalef_ph(src: __m512h, k: __mmask32, a: __m512h, b: __m512h) -> __m512h {
_mm512_mask_scalef_round_ph::<_MM_FROUND_CUR_DIRECTION>(src, k, a, b)
}
@@ -10656,7 +10656,7 @@ pub fn _mm512_mask_scalef_ph(src: __m512h, k: __mmask32, a: __m512h, b: __m512h)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vscalefph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_scalef_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h {
_mm512_mask_scalef_ph(_mm512_setzero_ph(), k, a, b)
}
@@ -10677,7 +10677,7 @@ pub fn _mm512_maskz_scalef_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vscalefph, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_scalef_round_ph<const ROUNDING: i32>(a: __m512h, b: __m512h) -> __m512h {
static_assert_rounding!(ROUNDING);
_mm512_mask_scalef_round_ph::<ROUNDING>(_mm512_undefined_ph(), 0xffffffff, a, b)
@@ -10699,7 +10699,7 @@ pub fn _mm512_scalef_round_ph<const ROUNDING: i32>(a: __m512h, b: __m512h) -> __
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vscalefph, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_scalef_round_ph<const ROUNDING: i32>(
src: __m512h,
k: __mmask32,
@@ -10728,7 +10728,7 @@ pub fn _mm512_mask_scalef_round_ph<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vscalefph, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_scalef_round_ph<const ROUNDING: i32>(
k: __mmask32,
a: __m512h,
@@ -10746,7 +10746,7 @@ pub fn _mm512_maskz_scalef_round_ph<const ROUNDING: i32>(
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vscalefsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_scalef_sh(a: __m128h, b: __m128h) -> __m128h {
_mm_mask_scalef_sh(f16x8::ZERO.as_m128h(), 0xff, a, b)
}
@@ -10759,7 +10759,7 @@ pub fn _mm_scalef_sh(a: __m128h, b: __m128h) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vscalefsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_scalef_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
_mm_mask_scalef_round_sh::<_MM_FROUND_CUR_DIRECTION>(src, k, a, b)
}
@@ -10772,7 +10772,7 @@ pub fn _mm_mask_scalef_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) ->
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vscalefsh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_scalef_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
_mm_mask_scalef_sh(f16x8::ZERO.as_m128h(), k, a, b)
}
@@ -10794,7 +10794,7 @@ pub fn _mm_maskz_scalef_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vscalefsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_scalef_round_sh<const ROUNDING: i32>(a: __m128h, b: __m128h) -> __m128h {
static_assert_rounding!(ROUNDING);
_mm_mask_scalef_round_sh::<ROUNDING>(f16x8::ZERO.as_m128h(), 0xff, a, b)
@@ -10817,7 +10817,7 @@ pub fn _mm_scalef_round_sh<const ROUNDING: i32>(a: __m128h, b: __m128h) -> __m12
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vscalefsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_scalef_round_sh<const ROUNDING: i32>(
src: __m128h,
k: __mmask8,
@@ -10847,7 +10847,7 @@ pub fn _mm_mask_scalef_round_sh<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vscalefsh, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_scalef_round_sh<const ROUNDING: i32>(
k: __mmask8,
a: __m128h,
@@ -10873,7 +10873,7 @@ pub fn _mm_maskz_scalef_round_sh<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_reduce_ph<const IMM8: i32>(a: __m128h) -> __m128h {
static_assert_uimm_bits!(IMM8, 8);
_mm_mask_reduce_ph::<IMM8>(_mm_undefined_ph(), 0xff, a)
@@ -10896,7 +10896,7 @@ pub fn _mm_reduce_ph<const IMM8: i32>(a: __m128h) -> __m128h {
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_reduce_ph<const IMM8: i32>(src: __m128h, k: __mmask8, a: __m128h) -> __m128h {
unsafe {
static_assert_uimm_bits!(IMM8, 8);
@@ -10921,7 +10921,7 @@ pub fn _mm_mask_reduce_ph<const IMM8: i32>(src: __m128h, k: __mmask8, a: __m128h
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_reduce_ph<const IMM8: i32>(k: __mmask8, a: __m128h) -> __m128h {
static_assert_uimm_bits!(IMM8, 8);
_mm_mask_reduce_ph::<IMM8>(_mm_setzero_ph(), k, a)
@@ -10943,7 +10943,7 @@ pub fn _mm_maskz_reduce_ph<const IMM8: i32>(k: __mmask8, a: __m128h) -> __m128h
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_reduce_ph<const IMM8: i32>(a: __m256h) -> __m256h {
static_assert_uimm_bits!(IMM8, 8);
_mm256_mask_reduce_ph::<IMM8>(_mm256_undefined_ph(), 0xffff, a)
@@ -10966,7 +10966,7 @@ pub fn _mm256_reduce_ph<const IMM8: i32>(a: __m256h) -> __m256h {
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_reduce_ph<const IMM8: i32>(src: __m256h, k: __mmask16, a: __m256h) -> __m256h {
unsafe {
static_assert_uimm_bits!(IMM8, 8);
@@ -10991,7 +10991,7 @@ pub fn _mm256_mask_reduce_ph<const IMM8: i32>(src: __m256h, k: __mmask16, a: __m
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_reduce_ph<const IMM8: i32>(k: __mmask16, a: __m256h) -> __m256h {
static_assert_uimm_bits!(IMM8, 8);
_mm256_mask_reduce_ph::<IMM8>(_mm256_setzero_ph(), k, a)
@@ -11013,7 +11013,7 @@ pub fn _mm256_maskz_reduce_ph<const IMM8: i32>(k: __mmask16, a: __m256h) -> __m2
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_reduce_ph<const IMM8: i32>(a: __m512h) -> __m512h {
static_assert_uimm_bits!(IMM8, 8);
_mm512_mask_reduce_ph::<IMM8>(_mm512_undefined_ph(), 0xffffffff, a)
@@ -11036,7 +11036,7 @@ pub fn _mm512_reduce_ph<const IMM8: i32>(a: __m512h) -> __m512h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_reduce_ph<const IMM8: i32>(src: __m512h, k: __mmask32, a: __m512h) -> __m512h {
static_assert_uimm_bits!(IMM8, 8);
_mm512_mask_reduce_round_ph::<IMM8, _MM_FROUND_CUR_DIRECTION>(src, k, a)
@@ -11059,7 +11059,7 @@ pub fn _mm512_mask_reduce_ph<const IMM8: i32>(src: __m512h, k: __mmask32, a: __m
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_reduce_ph<const IMM8: i32>(k: __mmask32, a: __m512h) -> __m512h {
static_assert_uimm_bits!(IMM8, 8);
_mm512_mask_reduce_ph::<IMM8>(_mm512_setzero_ph(), k, a)
@@ -11083,7 +11083,7 @@ pub fn _mm512_maskz_reduce_ph<const IMM8: i32>(k: __mmask32, a: __m512h) -> __m5
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0, SAE = 8))]
#[rustc_legacy_const_generics(1, 2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_reduce_round_ph<const IMM8: i32, const SAE: i32>(a: __m512h) -> __m512h {
static_assert_uimm_bits!(IMM8, 8);
static_assert_sae!(SAE);
@@ -11109,7 +11109,7 @@ pub fn _mm512_reduce_round_ph<const IMM8: i32, const SAE: i32>(a: __m512h) -> __
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0, SAE = 8))]
#[rustc_legacy_const_generics(3, 4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_reduce_round_ph<const IMM8: i32, const SAE: i32>(
src: __m512h,
k: __mmask32,
@@ -11141,7 +11141,7 @@ pub fn _mm512_mask_reduce_round_ph<const IMM8: i32, const SAE: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0, SAE = 8))]
#[rustc_legacy_const_generics(2, 3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_reduce_round_ph<const IMM8: i32, const SAE: i32>(
k: __mmask32,
a: __m512h,
@@ -11168,7 +11168,7 @@ pub fn _mm512_maskz_reduce_round_ph<const IMM8: i32, const SAE: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vreducesh, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_reduce_sh<const IMM8: i32>(a: __m128h, b: __m128h) -> __m128h {
static_assert_uimm_bits!(IMM8, 8);
_mm_mask_reduce_sh::<IMM8>(f16x8::ZERO.as_m128h(), 0xff, a, b)
@@ -11192,7 +11192,7 @@ pub fn _mm_reduce_sh<const IMM8: i32>(a: __m128h, b: __m128h) -> __m128h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vreducesh, IMM8 = 0))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_reduce_sh<const IMM8: i32>(
src: __m128h,
k: __mmask8,
@@ -11221,7 +11221,7 @@ pub fn _mm_mask_reduce_sh<const IMM8: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vreducesh, IMM8 = 0))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_reduce_sh<const IMM8: i32>(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
static_assert_uimm_bits!(IMM8, 8);
_mm_mask_reduce_sh::<IMM8>(f16x8::ZERO.as_m128h(), k, a, b)
@@ -11246,7 +11246,7 @@ pub fn _mm_maskz_reduce_sh<const IMM8: i32>(k: __mmask8, a: __m128h, b: __m128h)
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vreducesh, IMM8 = 0, SAE = 8))]
#[rustc_legacy_const_generics(2, 3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_reduce_round_sh<const IMM8: i32, const SAE: i32>(a: __m128h, b: __m128h) -> __m128h {
static_assert_uimm_bits!(IMM8, 8);
static_assert_sae!(SAE);
@@ -11273,7 +11273,7 @@ pub fn _mm_reduce_round_sh<const IMM8: i32, const SAE: i32>(a: __m128h, b: __m12
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vreducesh, IMM8 = 0, SAE = 8))]
#[rustc_legacy_const_generics(4, 5)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_reduce_round_sh<const IMM8: i32, const SAE: i32>(
src: __m128h,
k: __mmask8,
@@ -11307,7 +11307,7 @@ pub fn _mm_mask_reduce_round_sh<const IMM8: i32, const SAE: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vreducesh, IMM8 = 0, SAE = 8))]
#[rustc_legacy_const_generics(3, 4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_reduce_round_sh<const IMM8: i32, const SAE: i32>(
k: __mmask8,
a: __m128h,
@@ -11582,7 +11582,7 @@ macro_rules! fpclass_asm { // FIXME: use LLVM intrinsics
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfpclassph, IMM8 = 0))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_fpclass_ph_mask<const IMM8: i32>(a: __m128h) -> __mmask8 {
unsafe {
static_assert_uimm_bits!(IMM8, 8);
@@ -11609,7 +11609,7 @@ pub fn _mm_fpclass_ph_mask<const IMM8: i32>(a: __m128h) -> __mmask8 {
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfpclassph, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_fpclass_ph_mask<const IMM8: i32>(k1: __mmask8, a: __m128h) -> __mmask8 {
unsafe {
static_assert_uimm_bits!(IMM8, 8);
@@ -11635,7 +11635,7 @@ pub fn _mm_mask_fpclass_ph_mask<const IMM8: i32>(k1: __mmask8, a: __m128h) -> __
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfpclassph, IMM8 = 0))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_fpclass_ph_mask<const IMM8: i32>(a: __m256h) -> __mmask16 {
unsafe {
static_assert_uimm_bits!(IMM8, 8);
@@ -11662,7 +11662,7 @@ pub fn _mm256_fpclass_ph_mask<const IMM8: i32>(a: __m256h) -> __mmask16 {
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vfpclassph, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_fpclass_ph_mask<const IMM8: i32>(k1: __mmask16, a: __m256h) -> __mmask16 {
unsafe {
static_assert_uimm_bits!(IMM8, 8);
@@ -11688,7 +11688,7 @@ pub fn _mm256_mask_fpclass_ph_mask<const IMM8: i32>(k1: __mmask16, a: __m256h) -
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfpclassph, IMM8 = 0))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_fpclass_ph_mask<const IMM8: i32>(a: __m512h) -> __mmask32 {
unsafe {
static_assert_uimm_bits!(IMM8, 8);
@@ -11715,7 +11715,7 @@ pub fn _mm512_fpclass_ph_mask<const IMM8: i32>(a: __m512h) -> __mmask32 {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfpclassph, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_fpclass_ph_mask<const IMM8: i32>(k1: __mmask32, a: __m512h) -> __mmask32 {
unsafe {
static_assert_uimm_bits!(IMM8, 8);
@@ -11741,7 +11741,7 @@ pub fn _mm512_mask_fpclass_ph_mask<const IMM8: i32>(k1: __mmask32, a: __m512h) -
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfpclasssh, IMM8 = 0))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_fpclass_sh_mask<const IMM8: i32>(a: __m128h) -> __mmask8 {
_mm_mask_fpclass_sh_mask::<IMM8>(0xff, a)
}
@@ -11765,7 +11765,7 @@ pub fn _mm_fpclass_sh_mask<const IMM8: i32>(a: __m128h) -> __mmask8 {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vfpclasssh, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_fpclass_sh_mask<const IMM8: i32>(k1: __mmask8, a: __m128h) -> __mmask8 {
unsafe {
static_assert_uimm_bits!(IMM8, 8);
@@ -11779,7 +11779,7 @@ pub fn _mm_mask_fpclass_sh_mask<const IMM8: i32>(k1: __mmask8, a: __m128h) -> __
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_blend_ph)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_mask_blend_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
unsafe { simd_select_bitmask(k, b, a) }
@@ -11791,7 +11791,7 @@ pub const fn _mm_mask_blend_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_blend_ph)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm256_mask_blend_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256h {
unsafe { simd_select_bitmask(k, b, a) }
@@ -11803,7 +11803,7 @@ pub const fn _mm256_mask_blend_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m25
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_blend_ph)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm512_mask_blend_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h {
unsafe { simd_select_bitmask(k, b, a) }
@@ -11815,7 +11815,7 @@ pub const fn _mm512_mask_blend_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m51
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_permutex2var_ph)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_permutex2var_ph(a: __m128h, idx: __m128i, b: __m128h) -> __m128h {
_mm_castsi128_ph(_mm_permutex2var_epi16(
_mm_castph_si128(a),
@@ -11830,7 +11830,7 @@ pub fn _mm_permutex2var_ph(a: __m128h, idx: __m128i, b: __m128h) -> __m128h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permutex2var_ph)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_permutex2var_ph(a: __m256h, idx: __m256i, b: __m256h) -> __m256h {
_mm256_castsi256_ph(_mm256_permutex2var_epi16(
_mm256_castph_si256(a),
@@ -11845,7 +11845,7 @@ pub fn _mm256_permutex2var_ph(a: __m256h, idx: __m256i, b: __m256h) -> __m256h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_permutex2var_ph)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_permutex2var_ph(a: __m512h, idx: __m512i, b: __m512h) -> __m512h {
_mm512_castsi512_ph(_mm512_permutex2var_epi16(
_mm512_castph_si512(a),
@@ -11860,7 +11860,7 @@ pub fn _mm512_permutex2var_ph(a: __m512h, idx: __m512i, b: __m512h) -> __m512h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_permutexvar_ph)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_permutexvar_ph(idx: __m128i, a: __m128h) -> __m128h {
_mm_castsi128_ph(_mm_permutexvar_epi16(idx, _mm_castph_si128(a)))
}
@@ -11871,7 +11871,7 @@ pub fn _mm_permutexvar_ph(idx: __m128i, a: __m128h) -> __m128h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permutexvar_ph)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_permutexvar_ph(idx: __m256i, a: __m256h) -> __m256h {
_mm256_castsi256_ph(_mm256_permutexvar_epi16(idx, _mm256_castph_si256(a)))
}
@@ -11882,7 +11882,7 @@ pub fn _mm256_permutexvar_ph(idx: __m256i, a: __m256h) -> __m256h {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_permutexvar_ph)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_permutexvar_ph(idx: __m512i, a: __m512h) -> __m512h {
_mm512_castsi512_ph(_mm512_permutexvar_epi16(idx, _mm512_castph_si512(a)))
}
@@ -11894,7 +11894,7 @@ pub fn _mm512_permutexvar_ph(idx: __m512i, a: __m512h) -> __m512h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtw2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvtepi16_ph(a: __m128i) -> __m128h {
unsafe { vcvtw2ph_128(a.as_i16x8(), _MM_FROUND_CUR_DIRECTION) }
}
@@ -11907,7 +11907,7 @@ pub fn _mm_cvtepi16_ph(a: __m128i) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtw2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cvtepi16_ph(src: __m128h, k: __mmask8, a: __m128i) -> __m128h {
unsafe { simd_select_bitmask(k, _mm_cvtepi16_ph(a), src) }
}
@@ -11919,7 +11919,7 @@ pub fn _mm_mask_cvtepi16_ph(src: __m128h, k: __mmask8, a: __m128i) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtw2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_cvtepi16_ph(k: __mmask8, a: __m128i) -> __m128h {
_mm_mask_cvtepi16_ph(_mm_setzero_ph(), k, a)
}
@@ -11931,7 +11931,7 @@ pub fn _mm_maskz_cvtepi16_ph(k: __mmask8, a: __m128i) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtw2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_cvtepi16_ph(a: __m256i) -> __m256h {
unsafe { vcvtw2ph_256(a.as_i16x16(), _MM_FROUND_CUR_DIRECTION) }
}
@@ -11944,7 +11944,7 @@ pub fn _mm256_cvtepi16_ph(a: __m256i) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtw2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_cvtepi16_ph(src: __m256h, k: __mmask16, a: __m256i) -> __m256h {
unsafe { simd_select_bitmask(k, _mm256_cvtepi16_ph(a), src) }
}
@@ -11956,7 +11956,7 @@ pub fn _mm256_mask_cvtepi16_ph(src: __m256h, k: __mmask16, a: __m256i) -> __m256
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtw2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_cvtepi16_ph(k: __mmask16, a: __m256i) -> __m256h {
_mm256_mask_cvtepi16_ph(_mm256_setzero_ph(), k, a)
}
@@ -11968,7 +11968,7 @@ pub fn _mm256_maskz_cvtepi16_ph(k: __mmask16, a: __m256i) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtw2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvtepi16_ph(a: __m512i) -> __m512h {
unsafe { vcvtw2ph_512(a.as_i16x32(), _MM_FROUND_CUR_DIRECTION) }
}
@@ -11981,7 +11981,7 @@ pub fn _mm512_cvtepi16_ph(a: __m512i) -> __m512h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtw2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvtepi16_ph(src: __m512h, k: __mmask32, a: __m512i) -> __m512h {
unsafe { simd_select_bitmask(k, _mm512_cvtepi16_ph(a), src) }
}
@@ -11993,7 +11993,7 @@ pub fn _mm512_mask_cvtepi16_ph(src: __m512h, k: __mmask32, a: __m512i) -> __m512
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtw2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvtepi16_ph(k: __mmask32, a: __m512i) -> __m512h {
_mm512_mask_cvtepi16_ph(_mm512_setzero_ph(), k, a)
}
@@ -12014,7 +12014,7 @@ pub fn _mm512_maskz_cvtepi16_ph(k: __mmask32, a: __m512i) -> __m512h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtw2ph, ROUNDING = 8))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvt_roundepi16_ph<const ROUNDING: i32>(a: __m512i) -> __m512h {
unsafe {
static_assert_rounding!(ROUNDING);
@@ -12039,7 +12039,7 @@ pub fn _mm512_cvt_roundepi16_ph<const ROUNDING: i32>(a: __m512i) -> __m512h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtw2ph, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvt_roundepi16_ph<const ROUNDING: i32>(
src: __m512h,
k: __mmask32,
@@ -12067,7 +12067,7 @@ pub fn _mm512_mask_cvt_roundepi16_ph<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtw2ph, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvt_roundepi16_ph<const ROUNDING: i32>(k: __mmask32, a: __m512i) -> __m512h {
static_assert_rounding!(ROUNDING);
_mm512_mask_cvt_roundepi16_ph::<ROUNDING>(_mm512_setzero_ph(), k, a)
@@ -12080,7 +12080,7 @@ pub fn _mm512_maskz_cvt_roundepi16_ph<const ROUNDING: i32>(k: __mmask32, a: __m5
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtuw2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvtepu16_ph(a: __m128i) -> __m128h {
unsafe { vcvtuw2ph_128(a.as_u16x8(), _MM_FROUND_CUR_DIRECTION) }
}
@@ -12093,7 +12093,7 @@ pub fn _mm_cvtepu16_ph(a: __m128i) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtuw2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cvtepu16_ph(src: __m128h, k: __mmask8, a: __m128i) -> __m128h {
unsafe { simd_select_bitmask(k, _mm_cvtepu16_ph(a), src) }
}
@@ -12105,7 +12105,7 @@ pub fn _mm_mask_cvtepu16_ph(src: __m128h, k: __mmask8, a: __m128i) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtuw2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_cvtepu16_ph(k: __mmask8, a: __m128i) -> __m128h {
_mm_mask_cvtepu16_ph(_mm_setzero_ph(), k, a)
}
@@ -12117,7 +12117,7 @@ pub fn _mm_maskz_cvtepu16_ph(k: __mmask8, a: __m128i) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtuw2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_cvtepu16_ph(a: __m256i) -> __m256h {
unsafe { vcvtuw2ph_256(a.as_u16x16(), _MM_FROUND_CUR_DIRECTION) }
}
@@ -12130,7 +12130,7 @@ pub fn _mm256_cvtepu16_ph(a: __m256i) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtuw2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_cvtepu16_ph(src: __m256h, k: __mmask16, a: __m256i) -> __m256h {
unsafe { simd_select_bitmask(k, _mm256_cvtepu16_ph(a), src) }
}
@@ -12142,7 +12142,7 @@ pub fn _mm256_mask_cvtepu16_ph(src: __m256h, k: __mmask16, a: __m256i) -> __m256
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtuw2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_cvtepu16_ph(k: __mmask16, a: __m256i) -> __m256h {
_mm256_mask_cvtepu16_ph(_mm256_setzero_ph(), k, a)
}
@@ -12154,7 +12154,7 @@ pub fn _mm256_maskz_cvtepu16_ph(k: __mmask16, a: __m256i) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtuw2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvtepu16_ph(a: __m512i) -> __m512h {
unsafe { vcvtuw2ph_512(a.as_u16x32(), _MM_FROUND_CUR_DIRECTION) }
}
@@ -12167,7 +12167,7 @@ pub fn _mm512_cvtepu16_ph(a: __m512i) -> __m512h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtuw2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvtepu16_ph(src: __m512h, k: __mmask32, a: __m512i) -> __m512h {
unsafe { simd_select_bitmask(k, _mm512_cvtepu16_ph(a), src) }
}
@@ -12179,7 +12179,7 @@ pub fn _mm512_mask_cvtepu16_ph(src: __m512h, k: __mmask32, a: __m512i) -> __m512
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtuw2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvtepu16_ph(k: __mmask32, a: __m512i) -> __m512h {
_mm512_mask_cvtepu16_ph(_mm512_setzero_ph(), k, a)
}
@@ -12200,7 +12200,7 @@ pub fn _mm512_maskz_cvtepu16_ph(k: __mmask32, a: __m512i) -> __m512h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtuw2ph, ROUNDING = 8))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvt_roundepu16_ph<const ROUNDING: i32>(a: __m512i) -> __m512h {
unsafe {
static_assert_rounding!(ROUNDING);
@@ -12225,7 +12225,7 @@ pub fn _mm512_cvt_roundepu16_ph<const ROUNDING: i32>(a: __m512i) -> __m512h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtuw2ph, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvt_roundepu16_ph<const ROUNDING: i32>(
src: __m512h,
k: __mmask32,
@@ -12253,7 +12253,7 @@ pub fn _mm512_mask_cvt_roundepu16_ph<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtuw2ph, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvt_roundepu16_ph<const ROUNDING: i32>(k: __mmask32, a: __m512i) -> __m512h {
static_assert_rounding!(ROUNDING);
_mm512_mask_cvt_roundepu16_ph::<ROUNDING>(_mm512_setzero_ph(), k, a)
@@ -12266,7 +12266,7 @@ pub fn _mm512_maskz_cvt_roundepu16_ph<const ROUNDING: i32>(k: __mmask32, a: __m5
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtdq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvtepi32_ph(a: __m128i) -> __m128h {
_mm_mask_cvtepi32_ph(_mm_setzero_ph(), 0xff, a)
}
@@ -12279,7 +12279,7 @@ pub fn _mm_cvtepi32_ph(a: __m128i) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtdq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cvtepi32_ph(src: __m128h, k: __mmask8, a: __m128i) -> __m128h {
unsafe { vcvtdq2ph_128(a.as_i32x4(), src, k) }
}
@@ -12292,7 +12292,7 @@ pub fn _mm_mask_cvtepi32_ph(src: __m128h, k: __mmask8, a: __m128i) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtdq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_cvtepi32_ph(k: __mmask8, a: __m128i) -> __m128h {
_mm_mask_cvtepi32_ph(_mm_setzero_ph(), k, a)
}
@@ -12304,7 +12304,7 @@ pub fn _mm_maskz_cvtepi32_ph(k: __mmask8, a: __m128i) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtdq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_cvtepi32_ph(a: __m256i) -> __m128h {
unsafe { vcvtdq2ph_256(a.as_i32x8(), _MM_FROUND_CUR_DIRECTION) }
}
@@ -12317,7 +12317,7 @@ pub fn _mm256_cvtepi32_ph(a: __m256i) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtdq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_cvtepi32_ph(src: __m128h, k: __mmask8, a: __m256i) -> __m128h {
unsafe { simd_select_bitmask(k, _mm256_cvtepi32_ph(a), src) }
}
@@ -12329,7 +12329,7 @@ pub fn _mm256_mask_cvtepi32_ph(src: __m128h, k: __mmask8, a: __m256i) -> __m128h
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtdq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_cvtepi32_ph(k: __mmask8, a: __m256i) -> __m128h {
_mm256_mask_cvtepi32_ph(_mm_setzero_ph(), k, a)
}
@@ -12341,7 +12341,7 @@ pub fn _mm256_maskz_cvtepi32_ph(k: __mmask8, a: __m256i) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtdq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvtepi32_ph(a: __m512i) -> __m256h {
unsafe { vcvtdq2ph_512(a.as_i32x16(), _MM_FROUND_CUR_DIRECTION) }
}
@@ -12354,7 +12354,7 @@ pub fn _mm512_cvtepi32_ph(a: __m512i) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtdq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvtepi32_ph(src: __m256h, k: __mmask16, a: __m512i) -> __m256h {
unsafe { simd_select_bitmask(k, _mm512_cvtepi32_ph(a), src) }
}
@@ -12366,7 +12366,7 @@ pub fn _mm512_mask_cvtepi32_ph(src: __m256h, k: __mmask16, a: __m512i) -> __m256
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtdq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvtepi32_ph(k: __mmask16, a: __m512i) -> __m256h {
_mm512_mask_cvtepi32_ph(f16x16::ZERO.as_m256h(), k, a)
}
@@ -12387,7 +12387,7 @@ pub fn _mm512_maskz_cvtepi32_ph(k: __mmask16, a: __m512i) -> __m256h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtdq2ph, ROUNDING = 8))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvt_roundepi32_ph<const ROUNDING: i32>(a: __m512i) -> __m256h {
unsafe {
static_assert_rounding!(ROUNDING);
@@ -12412,7 +12412,7 @@ pub fn _mm512_cvt_roundepi32_ph<const ROUNDING: i32>(a: __m512i) -> __m256h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtdq2ph, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvt_roundepi32_ph<const ROUNDING: i32>(
src: __m256h,
k: __mmask16,
@@ -12440,7 +12440,7 @@ pub fn _mm512_mask_cvt_roundepi32_ph<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtdq2ph, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvt_roundepi32_ph<const ROUNDING: i32>(k: __mmask16, a: __m512i) -> __m256h {
static_assert_rounding!(ROUNDING);
_mm512_mask_cvt_roundepi32_ph::<ROUNDING>(f16x16::ZERO.as_m256h(), k, a)
@@ -12454,7 +12454,7 @@ pub fn _mm512_maskz_cvt_roundepi32_ph<const ROUNDING: i32>(k: __mmask16, a: __m5
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtsi2sh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvti32_sh(a: __m128h, b: i32) -> __m128h {
unsafe { vcvtsi2sh(a, b, _MM_FROUND_CUR_DIRECTION) }
}
@@ -12476,7 +12476,7 @@ pub fn _mm_cvti32_sh(a: __m128h, b: i32) -> __m128h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtsi2sh, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvt_roundi32_sh<const ROUNDING: i32>(a: __m128h, b: i32) -> __m128h {
unsafe {
static_assert_rounding!(ROUNDING);
@@ -12491,7 +12491,7 @@ pub fn _mm_cvt_roundi32_sh<const ROUNDING: i32>(a: __m128h, b: i32) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtudq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvtepu32_ph(a: __m128i) -> __m128h {
_mm_mask_cvtepu32_ph(_mm_setzero_ph(), 0xff, a)
}
@@ -12504,7 +12504,7 @@ pub fn _mm_cvtepu32_ph(a: __m128i) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtudq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cvtepu32_ph(src: __m128h, k: __mmask8, a: __m128i) -> __m128h {
unsafe { vcvtudq2ph_128(a.as_u32x4(), src, k) }
}
@@ -12517,7 +12517,7 @@ pub fn _mm_mask_cvtepu32_ph(src: __m128h, k: __mmask8, a: __m128i) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtudq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_cvtepu32_ph(k: __mmask8, a: __m128i) -> __m128h {
_mm_mask_cvtepu32_ph(_mm_setzero_ph(), k, a)
}
@@ -12529,7 +12529,7 @@ pub fn _mm_maskz_cvtepu32_ph(k: __mmask8, a: __m128i) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtudq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_cvtepu32_ph(a: __m256i) -> __m128h {
unsafe { vcvtudq2ph_256(a.as_u32x8(), _MM_FROUND_CUR_DIRECTION) }
}
@@ -12542,7 +12542,7 @@ pub fn _mm256_cvtepu32_ph(a: __m256i) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtudq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_cvtepu32_ph(src: __m128h, k: __mmask8, a: __m256i) -> __m128h {
unsafe { simd_select_bitmask(k, _mm256_cvtepu32_ph(a), src) }
}
@@ -12554,7 +12554,7 @@ pub fn _mm256_mask_cvtepu32_ph(src: __m128h, k: __mmask8, a: __m256i) -> __m128h
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtudq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_cvtepu32_ph(k: __mmask8, a: __m256i) -> __m128h {
_mm256_mask_cvtepu32_ph(_mm_setzero_ph(), k, a)
}
@@ -12566,7 +12566,7 @@ pub fn _mm256_maskz_cvtepu32_ph(k: __mmask8, a: __m256i) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtudq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvtepu32_ph(a: __m512i) -> __m256h {
unsafe { vcvtudq2ph_512(a.as_u32x16(), _MM_FROUND_CUR_DIRECTION) }
}
@@ -12579,7 +12579,7 @@ pub fn _mm512_cvtepu32_ph(a: __m512i) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtudq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvtepu32_ph(src: __m256h, k: __mmask16, a: __m512i) -> __m256h {
unsafe { simd_select_bitmask(k, _mm512_cvtepu32_ph(a), src) }
}
@@ -12591,7 +12591,7 @@ pub fn _mm512_mask_cvtepu32_ph(src: __m256h, k: __mmask16, a: __m512i) -> __m256
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtudq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvtepu32_ph(k: __mmask16, a: __m512i) -> __m256h {
_mm512_mask_cvtepu32_ph(f16x16::ZERO.as_m256h(), k, a)
}
@@ -12612,7 +12612,7 @@ pub fn _mm512_maskz_cvtepu32_ph(k: __mmask16, a: __m512i) -> __m256h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtudq2ph, ROUNDING = 8))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvt_roundepu32_ph<const ROUNDING: i32>(a: __m512i) -> __m256h {
unsafe {
static_assert_rounding!(ROUNDING);
@@ -12637,7 +12637,7 @@ pub fn _mm512_cvt_roundepu32_ph<const ROUNDING: i32>(a: __m512i) -> __m256h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtudq2ph, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvt_roundepu32_ph<const ROUNDING: i32>(
src: __m256h,
k: __mmask16,
@@ -12665,7 +12665,7 @@ pub fn _mm512_mask_cvt_roundepu32_ph<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtudq2ph, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvt_roundepu32_ph<const ROUNDING: i32>(k: __mmask16, a: __m512i) -> __m256h {
static_assert_rounding!(ROUNDING);
_mm512_mask_cvt_roundepu32_ph::<ROUNDING>(f16x16::ZERO.as_m256h(), k, a)
@@ -12679,7 +12679,7 @@ pub fn _mm512_maskz_cvt_roundepu32_ph<const ROUNDING: i32>(k: __mmask16, a: __m5
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtusi2sh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvtu32_sh(a: __m128h, b: u32) -> __m128h {
unsafe { vcvtusi2sh(a, b, _MM_FROUND_CUR_DIRECTION) }
}
@@ -12701,7 +12701,7 @@ pub fn _mm_cvtu32_sh(a: __m128h, b: u32) -> __m128h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtusi2sh, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvt_roundu32_sh<const ROUNDING: i32>(a: __m128h, b: u32) -> __m128h {
unsafe {
static_assert_rounding!(ROUNDING);
@@ -12716,7 +12716,7 @@ pub fn _mm_cvt_roundu32_sh<const ROUNDING: i32>(a: __m128h, b: u32) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtqq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvtepi64_ph(a: __m128i) -> __m128h {
_mm_mask_cvtepi64_ph(_mm_setzero_ph(), 0xff, a)
}
@@ -12729,7 +12729,7 @@ pub fn _mm_cvtepi64_ph(a: __m128i) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtqq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cvtepi64_ph(src: __m128h, k: __mmask8, a: __m128i) -> __m128h {
unsafe { vcvtqq2ph_128(a.as_i64x2(), src, k) }
}
@@ -12742,7 +12742,7 @@ pub fn _mm_mask_cvtepi64_ph(src: __m128h, k: __mmask8, a: __m128i) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtqq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_cvtepi64_ph(k: __mmask8, a: __m128i) -> __m128h {
_mm_mask_cvtepi64_ph(_mm_setzero_ph(), k, a)
}
@@ -12754,7 +12754,7 @@ pub fn _mm_maskz_cvtepi64_ph(k: __mmask8, a: __m128i) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtqq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_cvtepi64_ph(a: __m256i) -> __m128h {
_mm256_mask_cvtepi64_ph(_mm_setzero_ph(), 0xff, a)
}
@@ -12767,7 +12767,7 @@ pub fn _mm256_cvtepi64_ph(a: __m256i) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtqq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_cvtepi64_ph(src: __m128h, k: __mmask8, a: __m256i) -> __m128h {
unsafe { vcvtqq2ph_256(a.as_i64x4(), src, k) }
}
@@ -12780,7 +12780,7 @@ pub fn _mm256_mask_cvtepi64_ph(src: __m128h, k: __mmask8, a: __m256i) -> __m128h
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtqq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_cvtepi64_ph(k: __mmask8, a: __m256i) -> __m128h {
_mm256_mask_cvtepi64_ph(_mm_setzero_ph(), k, a)
}
@@ -12792,7 +12792,7 @@ pub fn _mm256_maskz_cvtepi64_ph(k: __mmask8, a: __m256i) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtqq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvtepi64_ph(a: __m512i) -> __m128h {
unsafe { vcvtqq2ph_512(a.as_i64x8(), _MM_FROUND_CUR_DIRECTION) }
}
@@ -12805,7 +12805,7 @@ pub fn _mm512_cvtepi64_ph(a: __m512i) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtqq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvtepi64_ph(src: __m128h, k: __mmask8, a: __m512i) -> __m128h {
unsafe { simd_select_bitmask(k, _mm512_cvtepi64_ph(a), src) }
}
@@ -12817,7 +12817,7 @@ pub fn _mm512_mask_cvtepi64_ph(src: __m128h, k: __mmask8, a: __m512i) -> __m128h
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtqq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvtepi64_ph(k: __mmask8, a: __m512i) -> __m128h {
_mm512_mask_cvtepi64_ph(f16x8::ZERO.as_m128h(), k, a)
}
@@ -12838,7 +12838,7 @@ pub fn _mm512_maskz_cvtepi64_ph(k: __mmask8, a: __m512i) -> __m128h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtqq2ph, ROUNDING = 8))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvt_roundepi64_ph<const ROUNDING: i32>(a: __m512i) -> __m128h {
unsafe {
static_assert_rounding!(ROUNDING);
@@ -12863,7 +12863,7 @@ pub fn _mm512_cvt_roundepi64_ph<const ROUNDING: i32>(a: __m512i) -> __m128h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtqq2ph, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvt_roundepi64_ph<const ROUNDING: i32>(
src: __m128h,
k: __mmask8,
@@ -12891,7 +12891,7 @@ pub fn _mm512_mask_cvt_roundepi64_ph<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtqq2ph, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvt_roundepi64_ph<const ROUNDING: i32>(k: __mmask8, a: __m512i) -> __m128h {
static_assert_rounding!(ROUNDING);
_mm512_mask_cvt_roundepi64_ph::<ROUNDING>(f16x8::ZERO.as_m128h(), k, a)
@@ -12904,7 +12904,7 @@ pub fn _mm512_maskz_cvt_roundepi64_ph<const ROUNDING: i32>(k: __mmask8, a: __m51
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtuqq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvtepu64_ph(a: __m128i) -> __m128h {
_mm_mask_cvtepu64_ph(_mm_setzero_ph(), 0xff, a)
}
@@ -12917,7 +12917,7 @@ pub fn _mm_cvtepu64_ph(a: __m128i) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtuqq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cvtepu64_ph(src: __m128h, k: __mmask8, a: __m128i) -> __m128h {
unsafe { vcvtuqq2ph_128(a.as_u64x2(), src, k) }
}
@@ -12930,7 +12930,7 @@ pub fn _mm_mask_cvtepu64_ph(src: __m128h, k: __mmask8, a: __m128i) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtuqq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_cvtepu64_ph(k: __mmask8, a: __m128i) -> __m128h {
_mm_mask_cvtepu64_ph(_mm_setzero_ph(), k, a)
}
@@ -12942,7 +12942,7 @@ pub fn _mm_maskz_cvtepu64_ph(k: __mmask8, a: __m128i) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtuqq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_cvtepu64_ph(a: __m256i) -> __m128h {
_mm256_mask_cvtepu64_ph(_mm_setzero_ph(), 0xff, a)
}
@@ -12955,7 +12955,7 @@ pub fn _mm256_cvtepu64_ph(a: __m256i) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtuqq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_cvtepu64_ph(src: __m128h, k: __mmask8, a: __m256i) -> __m128h {
unsafe { vcvtuqq2ph_256(a.as_u64x4(), src, k) }
}
@@ -12968,7 +12968,7 @@ pub fn _mm256_mask_cvtepu64_ph(src: __m128h, k: __mmask8, a: __m256i) -> __m128h
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtuqq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_cvtepu64_ph(k: __mmask8, a: __m256i) -> __m128h {
_mm256_mask_cvtepu64_ph(_mm_setzero_ph(), k, a)
}
@@ -12980,7 +12980,7 @@ pub fn _mm256_maskz_cvtepu64_ph(k: __mmask8, a: __m256i) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtuqq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvtepu64_ph(a: __m512i) -> __m128h {
unsafe { vcvtuqq2ph_512(a.as_u64x8(), _MM_FROUND_CUR_DIRECTION) }
}
@@ -12993,7 +12993,7 @@ pub fn _mm512_cvtepu64_ph(a: __m512i) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtuqq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvtepu64_ph(src: __m128h, k: __mmask8, a: __m512i) -> __m128h {
unsafe { simd_select_bitmask(k, _mm512_cvtepu64_ph(a), src) }
}
@@ -13005,7 +13005,7 @@ pub fn _mm512_mask_cvtepu64_ph(src: __m128h, k: __mmask8, a: __m512i) -> __m128h
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtuqq2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvtepu64_ph(k: __mmask8, a: __m512i) -> __m128h {
_mm512_mask_cvtepu64_ph(f16x8::ZERO.as_m128h(), k, a)
}
@@ -13026,7 +13026,7 @@ pub fn _mm512_maskz_cvtepu64_ph(k: __mmask8, a: __m512i) -> __m128h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtuqq2ph, ROUNDING = 8))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvt_roundepu64_ph<const ROUNDING: i32>(a: __m512i) -> __m128h {
unsafe {
static_assert_rounding!(ROUNDING);
@@ -13051,7 +13051,7 @@ pub fn _mm512_cvt_roundepu64_ph<const ROUNDING: i32>(a: __m512i) -> __m128h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtuqq2ph, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvt_roundepu64_ph<const ROUNDING: i32>(
src: __m128h,
k: __mmask8,
@@ -13079,7 +13079,7 @@ pub fn _mm512_mask_cvt_roundepu64_ph<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtuqq2ph, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvt_roundepu64_ph<const ROUNDING: i32>(k: __mmask8, a: __m512i) -> __m128h {
static_assert_rounding!(ROUNDING);
_mm512_mask_cvt_roundepu64_ph::<ROUNDING>(f16x8::ZERO.as_m128h(), k, a)
@@ -13092,7 +13092,7 @@ pub fn _mm512_maskz_cvt_roundepu64_ph<const ROUNDING: i32>(k: __mmask8, a: __m51
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtps2phx))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvtxps_ph(a: __m128) -> __m128h {
_mm_mask_cvtxps_ph(_mm_setzero_ph(), 0xff, a)
}
@@ -13105,7 +13105,7 @@ pub fn _mm_cvtxps_ph(a: __m128) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtps2phx))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cvtxps_ph(src: __m128h, k: __mmask8, a: __m128) -> __m128h {
unsafe { vcvtps2phx_128(a, src, k) }
}
@@ -13118,7 +13118,7 @@ pub fn _mm_mask_cvtxps_ph(src: __m128h, k: __mmask8, a: __m128) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtps2phx))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_cvtxps_ph(k: __mmask8, a: __m128) -> __m128h {
_mm_mask_cvtxps_ph(_mm_setzero_ph(), k, a)
}
@@ -13130,7 +13130,7 @@ pub fn _mm_maskz_cvtxps_ph(k: __mmask8, a: __m128) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtps2phx))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_cvtxps_ph(a: __m256) -> __m128h {
_mm256_mask_cvtxps_ph(_mm_setzero_ph(), 0xff, a)
}
@@ -13143,7 +13143,7 @@ pub fn _mm256_cvtxps_ph(a: __m256) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtps2phx))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_cvtxps_ph(src: __m128h, k: __mmask8, a: __m256) -> __m128h {
unsafe { vcvtps2phx_256(a, src, k) }
}
@@ -13156,7 +13156,7 @@ pub fn _mm256_mask_cvtxps_ph(src: __m128h, k: __mmask8, a: __m256) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtps2phx))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_cvtxps_ph(k: __mmask8, a: __m256) -> __m128h {
_mm256_mask_cvtxps_ph(_mm_setzero_ph(), k, a)
}
@@ -13168,7 +13168,7 @@ pub fn _mm256_maskz_cvtxps_ph(k: __mmask8, a: __m256) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtps2phx))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvtxps_ph(a: __m512) -> __m256h {
_mm512_mask_cvtxps_ph(f16x16::ZERO.as_m256h(), 0xffff, a)
}
@@ -13181,7 +13181,7 @@ pub fn _mm512_cvtxps_ph(a: __m512) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtps2phx))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvtxps_ph(src: __m256h, k: __mmask16, a: __m512) -> __m256h {
unsafe { vcvtps2phx_512(a, src, k, _MM_FROUND_CUR_DIRECTION) }
}
@@ -13194,7 +13194,7 @@ pub fn _mm512_mask_cvtxps_ph(src: __m256h, k: __mmask16, a: __m512) -> __m256h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtps2phx))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvtxps_ph(k: __mmask16, a: __m512) -> __m256h {
_mm512_mask_cvtxps_ph(f16x16::ZERO.as_m256h(), k, a)
}
@@ -13215,7 +13215,7 @@ pub fn _mm512_maskz_cvtxps_ph(k: __mmask16, a: __m512) -> __m256h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtps2phx, ROUNDING = 8))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvtx_roundps_ph<const ROUNDING: i32>(a: __m512) -> __m256h {
static_assert_rounding!(ROUNDING);
_mm512_mask_cvtx_roundps_ph::<ROUNDING>(f16x16::ZERO.as_m256h(), 0xffff, a)
@@ -13238,7 +13238,7 @@ pub fn _mm512_cvtx_roundps_ph<const ROUNDING: i32>(a: __m512) -> __m256h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtps2phx, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvtx_roundps_ph<const ROUNDING: i32>(
src: __m256h,
k: __mmask16,
@@ -13267,7 +13267,7 @@ pub fn _mm512_mask_cvtx_roundps_ph<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtps2phx, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvtx_roundps_ph<const ROUNDING: i32>(k: __mmask16, a: __m512) -> __m256h {
static_assert_rounding!(ROUNDING);
_mm512_mask_cvtx_roundps_ph::<ROUNDING>(f16x16::ZERO.as_m256h(), k, a)
@@ -13281,7 +13281,7 @@ pub fn _mm512_maskz_cvtx_roundps_ph<const ROUNDING: i32>(k: __mmask16, a: __m512
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtss2sh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvtss_sh(a: __m128h, b: __m128) -> __m128h {
_mm_mask_cvtss_sh(f16x8::ZERO.as_m128h(), 0xff, a, b)
}
@@ -13295,7 +13295,7 @@ pub fn _mm_cvtss_sh(a: __m128h, b: __m128) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtss2sh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cvtss_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128) -> __m128h {
unsafe { vcvtss2sh(a, b, src, k, _MM_FROUND_CUR_DIRECTION) }
}
@@ -13309,7 +13309,7 @@ pub fn _mm_mask_cvtss_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128) -> __
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtss2sh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_cvtss_sh(k: __mmask8, a: __m128h, b: __m128) -> __m128h {
_mm_mask_cvtss_sh(f16x8::ZERO.as_m128h(), k, a, b)
}
@@ -13331,7 +13331,7 @@ pub fn _mm_maskz_cvtss_sh(k: __mmask8, a: __m128h, b: __m128) -> __m128h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtss2sh, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvt_roundss_sh<const ROUNDING: i32>(a: __m128h, b: __m128) -> __m128h {
static_assert_rounding!(ROUNDING);
_mm_mask_cvt_roundss_sh::<ROUNDING>(f16x8::ZERO.as_m128h(), 0xff, a, b)
@@ -13355,7 +13355,7 @@ pub fn _mm_cvt_roundss_sh<const ROUNDING: i32>(a: __m128h, b: __m128) -> __m128h
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtss2sh, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cvt_roundss_sh<const ROUNDING: i32>(
src: __m128h,
k: __mmask8,
@@ -13386,7 +13386,7 @@ pub fn _mm_mask_cvt_roundss_sh<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtss2sh, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_cvt_roundss_sh<const ROUNDING: i32>(
k: __mmask8,
a: __m128h,
@@ -13403,7 +13403,7 @@ pub fn _mm_maskz_cvt_roundss_sh<const ROUNDING: i32>(
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtpd2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvtpd_ph(a: __m128d) -> __m128h {
_mm_mask_cvtpd_ph(_mm_setzero_ph(), 0xff, a)
}
@@ -13416,7 +13416,7 @@ pub fn _mm_cvtpd_ph(a: __m128d) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtpd2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cvtpd_ph(src: __m128h, k: __mmask8, a: __m128d) -> __m128h {
unsafe { vcvtpd2ph_128(a, src, k) }
}
@@ -13429,7 +13429,7 @@ pub fn _mm_mask_cvtpd_ph(src: __m128h, k: __mmask8, a: __m128d) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtpd2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_cvtpd_ph(k: __mmask8, a: __m128d) -> __m128h {
_mm_mask_cvtpd_ph(_mm_setzero_ph(), k, a)
}
@@ -13441,7 +13441,7 @@ pub fn _mm_maskz_cvtpd_ph(k: __mmask8, a: __m128d) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtpd2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_cvtpd_ph(a: __m256d) -> __m128h {
_mm256_mask_cvtpd_ph(_mm_setzero_ph(), 0xff, a)
}
@@ -13454,7 +13454,7 @@ pub fn _mm256_cvtpd_ph(a: __m256d) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtpd2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_cvtpd_ph(src: __m128h, k: __mmask8, a: __m256d) -> __m128h {
unsafe { vcvtpd2ph_256(a, src, k) }
}
@@ -13467,7 +13467,7 @@ pub fn _mm256_mask_cvtpd_ph(src: __m128h, k: __mmask8, a: __m256d) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtpd2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_cvtpd_ph(k: __mmask8, a: __m256d) -> __m128h {
_mm256_mask_cvtpd_ph(_mm_setzero_ph(), k, a)
}
@@ -13479,7 +13479,7 @@ pub fn _mm256_maskz_cvtpd_ph(k: __mmask8, a: __m256d) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtpd2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvtpd_ph(a: __m512d) -> __m128h {
_mm512_mask_cvtpd_ph(f16x8::ZERO.as_m128h(), 0xff, a)
}
@@ -13492,7 +13492,7 @@ pub fn _mm512_cvtpd_ph(a: __m512d) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtpd2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvtpd_ph(src: __m128h, k: __mmask8, a: __m512d) -> __m128h {
unsafe { vcvtpd2ph_512(a, src, k, _MM_FROUND_CUR_DIRECTION) }
}
@@ -13505,7 +13505,7 @@ pub fn _mm512_mask_cvtpd_ph(src: __m128h, k: __mmask8, a: __m512d) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtpd2ph))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvtpd_ph(k: __mmask8, a: __m512d) -> __m128h {
_mm512_mask_cvtpd_ph(f16x8::ZERO.as_m128h(), k, a)
}
@@ -13526,7 +13526,7 @@ pub fn _mm512_maskz_cvtpd_ph(k: __mmask8, a: __m512d) -> __m128h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtpd2ph, ROUNDING = 8))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvt_roundpd_ph<const ROUNDING: i32>(a: __m512d) -> __m128h {
static_assert_rounding!(ROUNDING);
_mm512_mask_cvt_roundpd_ph::<ROUNDING>(f16x8::ZERO.as_m128h(), 0xff, a)
@@ -13549,7 +13549,7 @@ pub fn _mm512_cvt_roundpd_ph<const ROUNDING: i32>(a: __m512d) -> __m128h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtpd2ph, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvt_roundpd_ph<const ROUNDING: i32>(
src: __m128h,
k: __mmask8,
@@ -13578,7 +13578,7 @@ pub fn _mm512_mask_cvt_roundpd_ph<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtpd2ph, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvt_roundpd_ph<const ROUNDING: i32>(k: __mmask8, a: __m512d) -> __m128h {
static_assert_rounding!(ROUNDING);
_mm512_mask_cvt_roundpd_ph::<ROUNDING>(f16x8::ZERO.as_m128h(), k, a)
@@ -13592,7 +13592,7 @@ pub fn _mm512_maskz_cvt_roundpd_ph<const ROUNDING: i32>(k: __mmask8, a: __m512d)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtsd2sh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvtsd_sh(a: __m128h, b: __m128d) -> __m128h {
_mm_mask_cvtsd_sh(f16x8::ZERO.as_m128h(), 0xff, a, b)
}
@@ -13606,7 +13606,7 @@ pub fn _mm_cvtsd_sh(a: __m128h, b: __m128d) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtsd2sh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cvtsd_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128d) -> __m128h {
unsafe { vcvtsd2sh(a, b, src, k, _MM_FROUND_CUR_DIRECTION) }
}
@@ -13620,7 +13620,7 @@ pub fn _mm_mask_cvtsd_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128d) -> _
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtsd2sh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_cvtsd_sh(k: __mmask8, a: __m128h, b: __m128d) -> __m128h {
_mm_mask_cvtsd_sh(f16x8::ZERO.as_m128h(), k, a, b)
}
@@ -13642,7 +13642,7 @@ pub fn _mm_maskz_cvtsd_sh(k: __mmask8, a: __m128h, b: __m128d) -> __m128h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtsd2sh, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvt_roundsd_sh<const ROUNDING: i32>(a: __m128h, b: __m128d) -> __m128h {
static_assert_rounding!(ROUNDING);
_mm_mask_cvt_roundsd_sh::<ROUNDING>(f16x8::ZERO.as_m128h(), 0xff, a, b)
@@ -13666,7 +13666,7 @@ pub fn _mm_cvt_roundsd_sh<const ROUNDING: i32>(a: __m128h, b: __m128d) -> __m128
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtsd2sh, ROUNDING = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cvt_roundsd_sh<const ROUNDING: i32>(
src: __m128h,
k: __mmask8,
@@ -13697,7 +13697,7 @@ pub fn _mm_mask_cvt_roundsd_sh<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtsd2sh, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_cvt_roundsd_sh<const ROUNDING: i32>(
k: __mmask8,
a: __m128h,
@@ -13714,7 +13714,7 @@ pub fn _mm_maskz_cvt_roundsd_sh<const ROUNDING: i32>(
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2w))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvtph_epi16(a: __m128h) -> __m128i {
_mm_mask_cvtph_epi16(_mm_undefined_si128(), 0xff, a)
}
@@ -13727,7 +13727,7 @@ pub fn _mm_cvtph_epi16(a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2w))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cvtph_epi16(src: __m128i, k: __mmask8, a: __m128h) -> __m128i {
unsafe { transmute(vcvtph2w_128(a, src.as_i16x8(), k)) }
}
@@ -13739,7 +13739,7 @@ pub fn _mm_mask_cvtph_epi16(src: __m128i, k: __mmask8, a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2w))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_cvtph_epi16(k: __mmask8, a: __m128h) -> __m128i {
_mm_mask_cvtph_epi16(_mm_setzero_si128(), k, a)
}
@@ -13751,7 +13751,7 @@ pub fn _mm_maskz_cvtph_epi16(k: __mmask8, a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2w))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_cvtph_epi16(a: __m256h) -> __m256i {
_mm256_mask_cvtph_epi16(_mm256_undefined_si256(), 0xffff, a)
}
@@ -13764,7 +13764,7 @@ pub fn _mm256_cvtph_epi16(a: __m256h) -> __m256i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2w))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_cvtph_epi16(src: __m256i, k: __mmask16, a: __m256h) -> __m256i {
unsafe { transmute(vcvtph2w_256(a, src.as_i16x16(), k)) }
}
@@ -13776,7 +13776,7 @@ pub fn _mm256_mask_cvtph_epi16(src: __m256i, k: __mmask16, a: __m256h) -> __m256
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2w))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_cvtph_epi16(k: __mmask16, a: __m256h) -> __m256i {
_mm256_mask_cvtph_epi16(_mm256_setzero_si256(), k, a)
}
@@ -13788,7 +13788,7 @@ pub fn _mm256_maskz_cvtph_epi16(k: __mmask16, a: __m256h) -> __m256i {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2w))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvtph_epi16(a: __m512h) -> __m512i {
_mm512_mask_cvtph_epi16(_mm512_undefined_epi32(), 0xffffffff, a)
}
@@ -13801,7 +13801,7 @@ pub fn _mm512_cvtph_epi16(a: __m512h) -> __m512i {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2w))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvtph_epi16(src: __m512i, k: __mmask32, a: __m512h) -> __m512i {
unsafe {
transmute(vcvtph2w_512(
@@ -13820,7 +13820,7 @@ pub fn _mm512_mask_cvtph_epi16(src: __m512i, k: __mmask32, a: __m512h) -> __m512
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2w))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvtph_epi16(k: __mmask32, a: __m512h) -> __m512i {
_mm512_mask_cvtph_epi16(_mm512_setzero_si512(), k, a)
}
@@ -13841,7 +13841,7 @@ pub fn _mm512_maskz_cvtph_epi16(k: __mmask32, a: __m512h) -> __m512i {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2w, ROUNDING = 8))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvt_roundph_epi16<const ROUNDING: i32>(a: __m512h) -> __m512i {
static_assert_rounding!(ROUNDING);
_mm512_mask_cvt_roundph_epi16::<ROUNDING>(_mm512_undefined_epi32(), 0xffffffff, a)
@@ -13864,7 +13864,7 @@ pub fn _mm512_cvt_roundph_epi16<const ROUNDING: i32>(a: __m512h) -> __m512i {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2w, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvt_roundph_epi16<const ROUNDING: i32>(
src: __m512i,
k: __mmask32,
@@ -13892,7 +13892,7 @@ pub fn _mm512_mask_cvt_roundph_epi16<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2w, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvt_roundph_epi16<const ROUNDING: i32>(k: __mmask32, a: __m512h) -> __m512i {
static_assert_rounding!(ROUNDING);
_mm512_mask_cvt_roundph_epi16::<ROUNDING>(_mm512_setzero_si512(), k, a)
@@ -13905,7 +13905,7 @@ pub fn _mm512_maskz_cvt_roundph_epi16<const ROUNDING: i32>(k: __mmask32, a: __m5
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2uw))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvtph_epu16(a: __m128h) -> __m128i {
_mm_mask_cvtph_epu16(_mm_undefined_si128(), 0xff, a)
}
@@ -13918,7 +13918,7 @@ pub fn _mm_cvtph_epu16(a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2uw))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cvtph_epu16(src: __m128i, k: __mmask8, a: __m128h) -> __m128i {
unsafe { transmute(vcvtph2uw_128(a, src.as_u16x8(), k)) }
}
@@ -13930,7 +13930,7 @@ pub fn _mm_mask_cvtph_epu16(src: __m128i, k: __mmask8, a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2uw))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_cvtph_epu16(k: __mmask8, a: __m128h) -> __m128i {
_mm_mask_cvtph_epu16(_mm_setzero_si128(), k, a)
}
@@ -13942,7 +13942,7 @@ pub fn _mm_maskz_cvtph_epu16(k: __mmask8, a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2uw))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_cvtph_epu16(a: __m256h) -> __m256i {
_mm256_mask_cvtph_epu16(_mm256_undefined_si256(), 0xffff, a)
}
@@ -13955,7 +13955,7 @@ pub fn _mm256_cvtph_epu16(a: __m256h) -> __m256i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2uw))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_cvtph_epu16(src: __m256i, k: __mmask16, a: __m256h) -> __m256i {
unsafe { transmute(vcvtph2uw_256(a, src.as_u16x16(), k)) }
}
@@ -13967,7 +13967,7 @@ pub fn _mm256_mask_cvtph_epu16(src: __m256i, k: __mmask16, a: __m256h) -> __m256
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2uw))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_cvtph_epu16(k: __mmask16, a: __m256h) -> __m256i {
_mm256_mask_cvtph_epu16(_mm256_setzero_si256(), k, a)
}
@@ -13979,7 +13979,7 @@ pub fn _mm256_maskz_cvtph_epu16(k: __mmask16, a: __m256h) -> __m256i {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2uw))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvtph_epu16(a: __m512h) -> __m512i {
_mm512_mask_cvtph_epu16(_mm512_undefined_epi32(), 0xffffffff, a)
}
@@ -13992,7 +13992,7 @@ pub fn _mm512_cvtph_epu16(a: __m512h) -> __m512i {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2uw))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvtph_epu16(src: __m512i, k: __mmask32, a: __m512h) -> __m512i {
unsafe {
transmute(vcvtph2uw_512(
@@ -14011,7 +14011,7 @@ pub fn _mm512_mask_cvtph_epu16(src: __m512i, k: __mmask32, a: __m512h) -> __m512
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2uw))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvtph_epu16(k: __mmask32, a: __m512h) -> __m512i {
_mm512_mask_cvtph_epu16(_mm512_setzero_si512(), k, a)
}
@@ -14026,7 +14026,7 @@ pub fn _mm512_maskz_cvtph_epu16(k: __mmask32, a: __m512h) -> __m512i {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2uw, SAE = 8))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvt_roundph_epu16<const SAE: i32>(a: __m512h) -> __m512i {
static_assert_sae!(SAE);
_mm512_mask_cvt_roundph_epu16::<SAE>(_mm512_undefined_epi32(), 0xffffffff, a)
@@ -14043,7 +14043,7 @@ pub fn _mm512_cvt_roundph_epu16<const SAE: i32>(a: __m512h) -> __m512i {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2uw, SAE = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvt_roundph_epu16<const SAE: i32>(
src: __m512i,
k: __mmask32,
@@ -14065,7 +14065,7 @@ pub fn _mm512_mask_cvt_roundph_epu16<const SAE: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2uw, SAE = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvt_roundph_epu16<const SAE: i32>(k: __mmask32, a: __m512h) -> __m512i {
static_assert_sae!(SAE);
_mm512_mask_cvt_roundph_epu16::<SAE>(_mm512_setzero_si512(), k, a)
@@ -14078,7 +14078,7 @@ pub fn _mm512_maskz_cvt_roundph_epu16<const SAE: i32>(k: __mmask32, a: __m512h)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2w))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvttph_epi16(a: __m128h) -> __m128i {
_mm_mask_cvttph_epi16(_mm_undefined_si128(), 0xff, a)
}
@@ -14091,7 +14091,7 @@ pub fn _mm_cvttph_epi16(a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2w))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cvttph_epi16(src: __m128i, k: __mmask8, a: __m128h) -> __m128i {
unsafe { transmute(vcvttph2w_128(a, src.as_i16x8(), k)) }
}
@@ -14104,7 +14104,7 @@ pub fn _mm_mask_cvttph_epi16(src: __m128i, k: __mmask8, a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2w))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_cvttph_epi16(k: __mmask8, a: __m128h) -> __m128i {
_mm_mask_cvttph_epi16(_mm_setzero_si128(), k, a)
}
@@ -14116,7 +14116,7 @@ pub fn _mm_maskz_cvttph_epi16(k: __mmask8, a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2w))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_cvttph_epi16(a: __m256h) -> __m256i {
_mm256_mask_cvttph_epi16(_mm256_undefined_si256(), 0xffff, a)
}
@@ -14129,7 +14129,7 @@ pub fn _mm256_cvttph_epi16(a: __m256h) -> __m256i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2w))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_cvttph_epi16(src: __m256i, k: __mmask16, a: __m256h) -> __m256i {
unsafe { transmute(vcvttph2w_256(a, src.as_i16x16(), k)) }
}
@@ -14142,7 +14142,7 @@ pub fn _mm256_mask_cvttph_epi16(src: __m256i, k: __mmask16, a: __m256h) -> __m25
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2w))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_cvttph_epi16(k: __mmask16, a: __m256h) -> __m256i {
_mm256_mask_cvttph_epi16(_mm256_setzero_si256(), k, a)
}
@@ -14154,7 +14154,7 @@ pub fn _mm256_maskz_cvttph_epi16(k: __mmask16, a: __m256h) -> __m256i {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2w))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvttph_epi16(a: __m512h) -> __m512i {
_mm512_mask_cvttph_epi16(_mm512_undefined_epi32(), 0xffffffff, a)
}
@@ -14167,7 +14167,7 @@ pub fn _mm512_cvttph_epi16(a: __m512h) -> __m512i {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2w))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvttph_epi16(src: __m512i, k: __mmask32, a: __m512h) -> __m512i {
unsafe {
transmute(vcvttph2w_512(
@@ -14187,7 +14187,7 @@ pub fn _mm512_mask_cvttph_epi16(src: __m512i, k: __mmask32, a: __m512h) -> __m51
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2w))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvttph_epi16(k: __mmask32, a: __m512h) -> __m512i {
_mm512_mask_cvttph_epi16(_mm512_setzero_si512(), k, a)
}
@@ -14202,7 +14202,7 @@ pub fn _mm512_maskz_cvttph_epi16(k: __mmask32, a: __m512h) -> __m512i {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2w, SAE = 8))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvtt_roundph_epi16<const SAE: i32>(a: __m512h) -> __m512i {
static_assert_sae!(SAE);
_mm512_mask_cvtt_roundph_epi16::<SAE>(_mm512_undefined_epi32(), 0xffffffff, a)
@@ -14219,7 +14219,7 @@ pub fn _mm512_cvtt_roundph_epi16<const SAE: i32>(a: __m512h) -> __m512i {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2w, SAE = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvtt_roundph_epi16<const SAE: i32>(
src: __m512i,
k: __mmask32,
@@ -14242,7 +14242,7 @@ pub fn _mm512_mask_cvtt_roundph_epi16<const SAE: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2w, SAE = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvtt_roundph_epi16<const SAE: i32>(k: __mmask32, a: __m512h) -> __m512i {
static_assert_sae!(SAE);
_mm512_mask_cvtt_roundph_epi16::<SAE>(_mm512_setzero_si512(), k, a)
@@ -14255,7 +14255,7 @@ pub fn _mm512_maskz_cvtt_roundph_epi16<const SAE: i32>(k: __mmask32, a: __m512h)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2uw))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvttph_epu16(a: __m128h) -> __m128i {
_mm_mask_cvttph_epu16(_mm_undefined_si128(), 0xff, a)
}
@@ -14268,7 +14268,7 @@ pub fn _mm_cvttph_epu16(a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2uw))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cvttph_epu16(src: __m128i, k: __mmask8, a: __m128h) -> __m128i {
unsafe { transmute(vcvttph2uw_128(a, src.as_u16x8(), k)) }
}
@@ -14281,7 +14281,7 @@ pub fn _mm_mask_cvttph_epu16(src: __m128i, k: __mmask8, a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2uw))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_cvttph_epu16(k: __mmask8, a: __m128h) -> __m128i {
_mm_mask_cvttph_epu16(_mm_setzero_si128(), k, a)
}
@@ -14293,7 +14293,7 @@ pub fn _mm_maskz_cvttph_epu16(k: __mmask8, a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2uw))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_cvttph_epu16(a: __m256h) -> __m256i {
_mm256_mask_cvttph_epu16(_mm256_undefined_si256(), 0xffff, a)
}
@@ -14306,7 +14306,7 @@ pub fn _mm256_cvttph_epu16(a: __m256h) -> __m256i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2uw))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_cvttph_epu16(src: __m256i, k: __mmask16, a: __m256h) -> __m256i {
unsafe { transmute(vcvttph2uw_256(a, src.as_u16x16(), k)) }
}
@@ -14319,7 +14319,7 @@ pub fn _mm256_mask_cvttph_epu16(src: __m256i, k: __mmask16, a: __m256h) -> __m25
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2uw))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_cvttph_epu16(k: __mmask16, a: __m256h) -> __m256i {
_mm256_mask_cvttph_epu16(_mm256_setzero_si256(), k, a)
}
@@ -14331,7 +14331,7 @@ pub fn _mm256_maskz_cvttph_epu16(k: __mmask16, a: __m256h) -> __m256i {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2uw))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvttph_epu16(a: __m512h) -> __m512i {
_mm512_mask_cvttph_epu16(_mm512_undefined_epi32(), 0xffffffff, a)
}
@@ -14344,7 +14344,7 @@ pub fn _mm512_cvttph_epu16(a: __m512h) -> __m512i {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2uw))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvttph_epu16(src: __m512i, k: __mmask32, a: __m512h) -> __m512i {
unsafe {
transmute(vcvttph2uw_512(
@@ -14364,7 +14364,7 @@ pub fn _mm512_mask_cvttph_epu16(src: __m512i, k: __mmask32, a: __m512h) -> __m51
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2uw))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvttph_epu16(k: __mmask32, a: __m512h) -> __m512i {
_mm512_mask_cvttph_epu16(_mm512_setzero_si512(), k, a)
}
@@ -14379,7 +14379,7 @@ pub fn _mm512_maskz_cvttph_epu16(k: __mmask32, a: __m512h) -> __m512i {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2uw, SAE = 8))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvtt_roundph_epu16<const SAE: i32>(a: __m512h) -> __m512i {
static_assert_sae!(SAE);
_mm512_mask_cvtt_roundph_epu16::<SAE>(_mm512_undefined_epi32(), 0xffffffff, a)
@@ -14396,7 +14396,7 @@ pub fn _mm512_cvtt_roundph_epu16<const SAE: i32>(a: __m512h) -> __m512i {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2uw, SAE = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvtt_roundph_epu16<const SAE: i32>(
src: __m512i,
k: __mmask32,
@@ -14419,7 +14419,7 @@ pub fn _mm512_mask_cvtt_roundph_epu16<const SAE: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2uw, SAE = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvtt_roundph_epu16<const SAE: i32>(k: __mmask32, a: __m512h) -> __m512i {
static_assert_sae!(SAE);
_mm512_mask_cvtt_roundph_epu16::<SAE>(_mm512_setzero_si512(), k, a)
@@ -14432,7 +14432,7 @@ pub fn _mm512_maskz_cvtt_roundph_epu16<const SAE: i32>(k: __mmask32, a: __m512h)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2dq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvtph_epi32(a: __m128h) -> __m128i {
_mm_mask_cvtph_epi32(_mm_undefined_si128(), 0xff, a)
}
@@ -14444,7 +14444,7 @@ pub fn _mm_cvtph_epi32(a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2dq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cvtph_epi32(src: __m128i, k: __mmask8, a: __m128h) -> __m128i {
unsafe { transmute(vcvtph2dq_128(a, src.as_i32x4(), k)) }
}
@@ -14456,7 +14456,7 @@ pub fn _mm_mask_cvtph_epi32(src: __m128i, k: __mmask8, a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2dq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_cvtph_epi32(k: __mmask8, a: __m128h) -> __m128i {
_mm_mask_cvtph_epi32(_mm_setzero_si128(), k, a)
}
@@ -14468,7 +14468,7 @@ pub fn _mm_maskz_cvtph_epi32(k: __mmask8, a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2dq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_cvtph_epi32(a: __m128h) -> __m256i {
_mm256_mask_cvtph_epi32(_mm256_undefined_si256(), 0xff, a)
}
@@ -14480,7 +14480,7 @@ pub fn _mm256_cvtph_epi32(a: __m128h) -> __m256i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2dq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_cvtph_epi32(src: __m256i, k: __mmask8, a: __m128h) -> __m256i {
unsafe { transmute(vcvtph2dq_256(a, src.as_i32x8(), k)) }
}
@@ -14492,7 +14492,7 @@ pub fn _mm256_mask_cvtph_epi32(src: __m256i, k: __mmask8, a: __m128h) -> __m256i
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2dq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_cvtph_epi32(k: __mmask8, a: __m128h) -> __m256i {
_mm256_mask_cvtph_epi32(_mm256_setzero_si256(), k, a)
}
@@ -14504,7 +14504,7 @@ pub fn _mm256_maskz_cvtph_epi32(k: __mmask8, a: __m128h) -> __m256i {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2dq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvtph_epi32(a: __m256h) -> __m512i {
_mm512_mask_cvtph_epi32(_mm512_undefined_epi32(), 0xffff, a)
}
@@ -14516,7 +14516,7 @@ pub fn _mm512_cvtph_epi32(a: __m256h) -> __m512i {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2dq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvtph_epi32(src: __m512i, k: __mmask16, a: __m256h) -> __m512i {
unsafe {
transmute(vcvtph2dq_512(
@@ -14535,7 +14535,7 @@ pub fn _mm512_mask_cvtph_epi32(src: __m512i, k: __mmask16, a: __m256h) -> __m512
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2dq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvtph_epi32(k: __mmask16, a: __m256h) -> __m512i {
_mm512_mask_cvtph_epi32(_mm512_setzero_si512(), k, a)
}
@@ -14556,7 +14556,7 @@ pub fn _mm512_maskz_cvtph_epi32(k: __mmask16, a: __m256h) -> __m512i {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2dq, ROUNDING = 8))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvt_roundph_epi32<const ROUNDING: i32>(a: __m256h) -> __m512i {
static_assert_rounding!(ROUNDING);
_mm512_mask_cvt_roundph_epi32::<ROUNDING>(_mm512_undefined_epi32(), 0xffff, a)
@@ -14578,7 +14578,7 @@ pub fn _mm512_cvt_roundph_epi32<const ROUNDING: i32>(a: __m256h) -> __m512i {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2dq, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvt_roundph_epi32<const ROUNDING: i32>(
src: __m512i,
k: __mmask16,
@@ -14606,7 +14606,7 @@ pub fn _mm512_mask_cvt_roundph_epi32<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2dq, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvt_roundph_epi32<const ROUNDING: i32>(k: __mmask16, a: __m256h) -> __m512i {
static_assert_rounding!(ROUNDING);
_mm512_mask_cvt_roundph_epi32::<ROUNDING>(_mm512_setzero_si512(), k, a)
@@ -14619,7 +14619,7 @@ pub fn _mm512_maskz_cvt_roundph_epi32<const ROUNDING: i32>(k: __mmask16, a: __m2
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtsh2si))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvtsh_i32(a: __m128h) -> i32 {
unsafe { vcvtsh2si32(a, _MM_FROUND_CUR_DIRECTION) }
}
@@ -14640,7 +14640,7 @@ pub fn _mm_cvtsh_i32(a: __m128h) -> i32 {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtsh2si, ROUNDING = 8))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvt_roundsh_i32<const ROUNDING: i32>(a: __m128h) -> i32 {
unsafe {
static_assert_rounding!(ROUNDING);
@@ -14655,7 +14655,7 @@ pub fn _mm_cvt_roundsh_i32<const ROUNDING: i32>(a: __m128h) -> i32 {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2udq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvtph_epu32(a: __m128h) -> __m128i {
_mm_mask_cvtph_epu32(_mm_undefined_si128(), 0xff, a)
}
@@ -14667,7 +14667,7 @@ pub fn _mm_cvtph_epu32(a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2udq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cvtph_epu32(src: __m128i, k: __mmask8, a: __m128h) -> __m128i {
unsafe { transmute(vcvtph2udq_128(a, src.as_u32x4(), k)) }
}
@@ -14679,7 +14679,7 @@ pub fn _mm_mask_cvtph_epu32(src: __m128i, k: __mmask8, a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2udq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_cvtph_epu32(k: __mmask8, a: __m128h) -> __m128i {
_mm_mask_cvtph_epu32(_mm_setzero_si128(), k, a)
}
@@ -14691,7 +14691,7 @@ pub fn _mm_maskz_cvtph_epu32(k: __mmask8, a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2udq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_cvtph_epu32(a: __m128h) -> __m256i {
_mm256_mask_cvtph_epu32(_mm256_undefined_si256(), 0xff, a)
}
@@ -14703,7 +14703,7 @@ pub fn _mm256_cvtph_epu32(a: __m128h) -> __m256i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2udq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_cvtph_epu32(src: __m256i, k: __mmask8, a: __m128h) -> __m256i {
unsafe { transmute(vcvtph2udq_256(a, src.as_u32x8(), k)) }
}
@@ -14715,7 +14715,7 @@ pub fn _mm256_mask_cvtph_epu32(src: __m256i, k: __mmask8, a: __m128h) -> __m256i
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2udq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_cvtph_epu32(k: __mmask8, a: __m128h) -> __m256i {
_mm256_mask_cvtph_epu32(_mm256_setzero_si256(), k, a)
}
@@ -14727,7 +14727,7 @@ pub fn _mm256_maskz_cvtph_epu32(k: __mmask8, a: __m128h) -> __m256i {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2udq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvtph_epu32(a: __m256h) -> __m512i {
_mm512_mask_cvtph_epu32(_mm512_undefined_epi32(), 0xffff, a)
}
@@ -14739,7 +14739,7 @@ pub fn _mm512_cvtph_epu32(a: __m256h) -> __m512i {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2udq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvtph_epu32(src: __m512i, k: __mmask16, a: __m256h) -> __m512i {
unsafe {
transmute(vcvtph2udq_512(
@@ -14758,7 +14758,7 @@ pub fn _mm512_mask_cvtph_epu32(src: __m512i, k: __mmask16, a: __m256h) -> __m512
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2udq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvtph_epu32(k: __mmask16, a: __m256h) -> __m512i {
_mm512_mask_cvtph_epu32(_mm512_setzero_si512(), k, a)
}
@@ -14779,7 +14779,7 @@ pub fn _mm512_maskz_cvtph_epu32(k: __mmask16, a: __m256h) -> __m512i {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2udq, ROUNDING = 8))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvt_roundph_epu32<const ROUNDING: i32>(a: __m256h) -> __m512i {
static_assert_rounding!(ROUNDING);
_mm512_mask_cvt_roundph_epu32::<ROUNDING>(_mm512_undefined_epi32(), 0xffff, a)
@@ -14801,7 +14801,7 @@ pub fn _mm512_cvt_roundph_epu32<const ROUNDING: i32>(a: __m256h) -> __m512i {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2udq, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvt_roundph_epu32<const ROUNDING: i32>(
src: __m512i,
k: __mmask16,
@@ -14829,7 +14829,7 @@ pub fn _mm512_mask_cvt_roundph_epu32<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2udq, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvt_roundph_epu32<const ROUNDING: i32>(k: __mmask16, a: __m256h) -> __m512i {
static_assert_rounding!(ROUNDING);
_mm512_mask_cvt_roundph_epu32::<ROUNDING>(_mm512_setzero_si512(), k, a)
@@ -14842,7 +14842,7 @@ pub fn _mm512_maskz_cvt_roundph_epu32<const ROUNDING: i32>(k: __mmask16, a: __m2
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtsh2usi))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvtsh_u32(a: __m128h) -> u32 {
unsafe { vcvtsh2usi32(a, _MM_FROUND_CUR_DIRECTION) }
}
@@ -14857,7 +14857,7 @@ pub fn _mm_cvtsh_u32(a: __m128h) -> u32 {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtsh2usi, SAE = 8))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvt_roundsh_u32<const SAE: i32>(a: __m128h) -> u32 {
unsafe {
static_assert_rounding!(SAE);
@@ -14872,7 +14872,7 @@ pub fn _mm_cvt_roundsh_u32<const SAE: i32>(a: __m128h) -> u32 {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2dq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvttph_epi32(a: __m128h) -> __m128i {
_mm_mask_cvttph_epi32(_mm_undefined_si128(), 0xff, a)
}
@@ -14884,7 +14884,7 @@ pub fn _mm_cvttph_epi32(a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2dq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cvttph_epi32(src: __m128i, k: __mmask8, a: __m128h) -> __m128i {
unsafe { transmute(vcvttph2dq_128(a, src.as_i32x4(), k)) }
}
@@ -14896,7 +14896,7 @@ pub fn _mm_mask_cvttph_epi32(src: __m128i, k: __mmask8, a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2dq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_cvttph_epi32(k: __mmask8, a: __m128h) -> __m128i {
_mm_mask_cvttph_epi32(_mm_setzero_si128(), k, a)
}
@@ -14908,7 +14908,7 @@ pub fn _mm_maskz_cvttph_epi32(k: __mmask8, a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2dq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_cvttph_epi32(a: __m128h) -> __m256i {
_mm256_mask_cvttph_epi32(_mm256_undefined_si256(), 0xff, a)
}
@@ -14920,7 +14920,7 @@ pub fn _mm256_cvttph_epi32(a: __m128h) -> __m256i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2dq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_cvttph_epi32(src: __m256i, k: __mmask8, a: __m128h) -> __m256i {
unsafe { transmute(vcvttph2dq_256(a, src.as_i32x8(), k)) }
}
@@ -14932,7 +14932,7 @@ pub fn _mm256_mask_cvttph_epi32(src: __m256i, k: __mmask8, a: __m128h) -> __m256
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2dq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_cvttph_epi32(k: __mmask8, a: __m128h) -> __m256i {
_mm256_mask_cvttph_epi32(_mm256_setzero_si256(), k, a)
}
@@ -14944,7 +14944,7 @@ pub fn _mm256_maskz_cvttph_epi32(k: __mmask8, a: __m128h) -> __m256i {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2dq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvttph_epi32(a: __m256h) -> __m512i {
_mm512_mask_cvttph_epi32(_mm512_undefined_epi32(), 0xffff, a)
}
@@ -14956,7 +14956,7 @@ pub fn _mm512_cvttph_epi32(a: __m256h) -> __m512i {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2dq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvttph_epi32(src: __m512i, k: __mmask16, a: __m256h) -> __m512i {
unsafe {
transmute(vcvttph2dq_512(
@@ -14975,7 +14975,7 @@ pub fn _mm512_mask_cvttph_epi32(src: __m512i, k: __mmask16, a: __m256h) -> __m51
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2dq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvttph_epi32(k: __mmask16, a: __m256h) -> __m512i {
_mm512_mask_cvttph_epi32(_mm512_setzero_si512(), k, a)
}
@@ -14990,7 +14990,7 @@ pub fn _mm512_maskz_cvttph_epi32(k: __mmask16, a: __m256h) -> __m512i {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2dq, SAE = 8))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvtt_roundph_epi32<const SAE: i32>(a: __m256h) -> __m512i {
static_assert_sae!(SAE);
_mm512_mask_cvtt_roundph_epi32::<SAE>(_mm512_undefined_epi32(), 0xffff, a)
@@ -15006,7 +15006,7 @@ pub fn _mm512_cvtt_roundph_epi32<const SAE: i32>(a: __m256h) -> __m512i {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2dq, SAE = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvtt_roundph_epi32<const SAE: i32>(
src: __m512i,
k: __mmask16,
@@ -15028,7 +15028,7 @@ pub fn _mm512_mask_cvtt_roundph_epi32<const SAE: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2dq, SAE = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvtt_roundph_epi32<const SAE: i32>(k: __mmask16, a: __m256h) -> __m512i {
static_assert_sae!(SAE);
_mm512_mask_cvtt_roundph_epi32::<SAE>(_mm512_setzero_si512(), k, a)
@@ -15041,7 +15041,7 @@ pub fn _mm512_maskz_cvtt_roundph_epi32<const SAE: i32>(k: __mmask16, a: __m256h)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttsh2si))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvttsh_i32(a: __m128h) -> i32 {
unsafe { vcvttsh2si32(a, _MM_FROUND_CUR_DIRECTION) }
}
@@ -15056,7 +15056,7 @@ pub fn _mm_cvttsh_i32(a: __m128h) -> i32 {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttsh2si, SAE = 8))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvtt_roundsh_i32<const SAE: i32>(a: __m128h) -> i32 {
unsafe {
static_assert_sae!(SAE);
@@ -15071,7 +15071,7 @@ pub fn _mm_cvtt_roundsh_i32<const SAE: i32>(a: __m128h) -> i32 {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2udq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvttph_epu32(a: __m128h) -> __m128i {
_mm_mask_cvttph_epu32(_mm_undefined_si128(), 0xff, a)
}
@@ -15083,7 +15083,7 @@ pub fn _mm_cvttph_epu32(a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2udq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cvttph_epu32(src: __m128i, k: __mmask8, a: __m128h) -> __m128i {
unsafe { transmute(vcvttph2udq_128(a, src.as_u32x4(), k)) }
}
@@ -15095,7 +15095,7 @@ pub fn _mm_mask_cvttph_epu32(src: __m128i, k: __mmask8, a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2udq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_cvttph_epu32(k: __mmask8, a: __m128h) -> __m128i {
_mm_mask_cvttph_epu32(_mm_setzero_si128(), k, a)
}
@@ -15107,7 +15107,7 @@ pub fn _mm_maskz_cvttph_epu32(k: __mmask8, a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2udq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_cvttph_epu32(a: __m128h) -> __m256i {
_mm256_mask_cvttph_epu32(_mm256_undefined_si256(), 0xff, a)
}
@@ -15119,7 +15119,7 @@ pub fn _mm256_cvttph_epu32(a: __m128h) -> __m256i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2udq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_cvttph_epu32(src: __m256i, k: __mmask8, a: __m128h) -> __m256i {
unsafe { transmute(vcvttph2udq_256(a, src.as_u32x8(), k)) }
}
@@ -15131,7 +15131,7 @@ pub fn _mm256_mask_cvttph_epu32(src: __m256i, k: __mmask8, a: __m128h) -> __m256
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2udq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_cvttph_epu32(k: __mmask8, a: __m128h) -> __m256i {
_mm256_mask_cvttph_epu32(_mm256_setzero_si256(), k, a)
}
@@ -15143,7 +15143,7 @@ pub fn _mm256_maskz_cvttph_epu32(k: __mmask8, a: __m128h) -> __m256i {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2udq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvttph_epu32(a: __m256h) -> __m512i {
_mm512_mask_cvttph_epu32(_mm512_undefined_epi32(), 0xffff, a)
}
@@ -15155,7 +15155,7 @@ pub fn _mm512_cvttph_epu32(a: __m256h) -> __m512i {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2udq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvttph_epu32(src: __m512i, k: __mmask16, a: __m256h) -> __m512i {
unsafe {
transmute(vcvttph2udq_512(
@@ -15174,7 +15174,7 @@ pub fn _mm512_mask_cvttph_epu32(src: __m512i, k: __mmask16, a: __m256h) -> __m51
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2udq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvttph_epu32(k: __mmask16, a: __m256h) -> __m512i {
_mm512_mask_cvttph_epu32(_mm512_setzero_si512(), k, a)
}
@@ -15189,7 +15189,7 @@ pub fn _mm512_maskz_cvttph_epu32(k: __mmask16, a: __m256h) -> __m512i {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2udq, SAE = 8))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvtt_roundph_epu32<const SAE: i32>(a: __m256h) -> __m512i {
static_assert_sae!(SAE);
_mm512_mask_cvtt_roundph_epu32::<SAE>(_mm512_undefined_epi32(), 0xffff, a)
@@ -15205,7 +15205,7 @@ pub fn _mm512_cvtt_roundph_epu32<const SAE: i32>(a: __m256h) -> __m512i {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2udq, SAE = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvtt_roundph_epu32<const SAE: i32>(
src: __m512i,
k: __mmask16,
@@ -15227,7 +15227,7 @@ pub fn _mm512_mask_cvtt_roundph_epu32<const SAE: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2udq, SAE = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvtt_roundph_epu32<const SAE: i32>(k: __mmask16, a: __m256h) -> __m512i {
static_assert_sae!(SAE);
_mm512_mask_cvtt_roundph_epu32::<SAE>(_mm512_setzero_si512(), k, a)
@@ -15240,7 +15240,7 @@ pub fn _mm512_maskz_cvtt_roundph_epu32<const SAE: i32>(k: __mmask16, a: __m256h)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttsh2usi))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvttsh_u32(a: __m128h) -> u32 {
unsafe { vcvttsh2usi32(a, _MM_FROUND_CUR_DIRECTION) }
}
@@ -15255,7 +15255,7 @@ pub fn _mm_cvttsh_u32(a: __m128h) -> u32 {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttsh2usi, SAE = 8))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvtt_roundsh_u32<const SAE: i32>(a: __m128h) -> u32 {
unsafe {
static_assert_sae!(SAE);
@@ -15270,7 +15270,7 @@ pub fn _mm_cvtt_roundsh_u32<const SAE: i32>(a: __m128h) -> u32 {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2qq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvtph_epi64(a: __m128h) -> __m128i {
_mm_mask_cvtph_epi64(_mm_undefined_si128(), 0xff, a)
}
@@ -15282,7 +15282,7 @@ pub fn _mm_cvtph_epi64(a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2qq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cvtph_epi64(src: __m128i, k: __mmask8, a: __m128h) -> __m128i {
unsafe { transmute(vcvtph2qq_128(a, src.as_i64x2(), k)) }
}
@@ -15294,7 +15294,7 @@ pub fn _mm_mask_cvtph_epi64(src: __m128i, k: __mmask8, a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2qq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_cvtph_epi64(k: __mmask8, a: __m128h) -> __m128i {
_mm_mask_cvtph_epi64(_mm_setzero_si128(), k, a)
}
@@ -15306,7 +15306,7 @@ pub fn _mm_maskz_cvtph_epi64(k: __mmask8, a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2qq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_cvtph_epi64(a: __m128h) -> __m256i {
_mm256_mask_cvtph_epi64(_mm256_undefined_si256(), 0xff, a)
}
@@ -15318,7 +15318,7 @@ pub fn _mm256_cvtph_epi64(a: __m128h) -> __m256i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2qq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_cvtph_epi64(src: __m256i, k: __mmask8, a: __m128h) -> __m256i {
unsafe { transmute(vcvtph2qq_256(a, src.as_i64x4(), k)) }
}
@@ -15330,7 +15330,7 @@ pub fn _mm256_mask_cvtph_epi64(src: __m256i, k: __mmask8, a: __m128h) -> __m256i
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2qq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_cvtph_epi64(k: __mmask8, a: __m128h) -> __m256i {
_mm256_mask_cvtph_epi64(_mm256_setzero_si256(), k, a)
}
@@ -15342,7 +15342,7 @@ pub fn _mm256_maskz_cvtph_epi64(k: __mmask8, a: __m128h) -> __m256i {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2qq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvtph_epi64(a: __m128h) -> __m512i {
_mm512_mask_cvtph_epi64(_mm512_undefined_epi32(), 0xff, a)
}
@@ -15354,7 +15354,7 @@ pub fn _mm512_cvtph_epi64(a: __m128h) -> __m512i {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2qq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvtph_epi64(src: __m512i, k: __mmask8, a: __m128h) -> __m512i {
unsafe {
transmute(vcvtph2qq_512(
@@ -15373,7 +15373,7 @@ pub fn _mm512_mask_cvtph_epi64(src: __m512i, k: __mmask8, a: __m128h) -> __m512i
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2qq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvtph_epi64(k: __mmask8, a: __m128h) -> __m512i {
_mm512_mask_cvtph_epi64(_mm512_setzero_si512(), k, a)
}
@@ -15394,7 +15394,7 @@ pub fn _mm512_maskz_cvtph_epi64(k: __mmask8, a: __m128h) -> __m512i {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2qq, ROUNDING = 8))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvt_roundph_epi64<const ROUNDING: i32>(a: __m128h) -> __m512i {
static_assert_rounding!(ROUNDING);
_mm512_mask_cvt_roundph_epi64::<ROUNDING>(_mm512_undefined_epi32(), 0xff, a)
@@ -15416,7 +15416,7 @@ pub fn _mm512_cvt_roundph_epi64<const ROUNDING: i32>(a: __m128h) -> __m512i {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2qq, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvt_roundph_epi64<const ROUNDING: i32>(
src: __m512i,
k: __mmask8,
@@ -15444,7 +15444,7 @@ pub fn _mm512_mask_cvt_roundph_epi64<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2qq, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvt_roundph_epi64<const ROUNDING: i32>(k: __mmask8, a: __m128h) -> __m512i {
static_assert_rounding!(ROUNDING);
_mm512_mask_cvt_roundph_epi64::<ROUNDING>(_mm512_setzero_si512(), k, a)
@@ -15457,7 +15457,7 @@ pub fn _mm512_maskz_cvt_roundph_epi64<const ROUNDING: i32>(k: __mmask8, a: __m12
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2uqq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvtph_epu64(a: __m128h) -> __m128i {
_mm_mask_cvtph_epu64(_mm_undefined_si128(), 0xff, a)
}
@@ -15469,7 +15469,7 @@ pub fn _mm_cvtph_epu64(a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2uqq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cvtph_epu64(src: __m128i, k: __mmask8, a: __m128h) -> __m128i {
unsafe { transmute(vcvtph2uqq_128(a, src.as_u64x2(), k)) }
}
@@ -15481,7 +15481,7 @@ pub fn _mm_mask_cvtph_epu64(src: __m128i, k: __mmask8, a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2uqq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_cvtph_epu64(k: __mmask8, a: __m128h) -> __m128i {
_mm_mask_cvtph_epu64(_mm_setzero_si128(), k, a)
}
@@ -15493,7 +15493,7 @@ pub fn _mm_maskz_cvtph_epu64(k: __mmask8, a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2uqq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_cvtph_epu64(a: __m128h) -> __m256i {
_mm256_mask_cvtph_epu64(_mm256_undefined_si256(), 0xff, a)
}
@@ -15505,7 +15505,7 @@ pub fn _mm256_cvtph_epu64(a: __m128h) -> __m256i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2uqq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_cvtph_epu64(src: __m256i, k: __mmask8, a: __m128h) -> __m256i {
unsafe { transmute(vcvtph2uqq_256(a, src.as_u64x4(), k)) }
}
@@ -15517,7 +15517,7 @@ pub fn _mm256_mask_cvtph_epu64(src: __m256i, k: __mmask8, a: __m128h) -> __m256i
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2uqq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_cvtph_epu64(k: __mmask8, a: __m128h) -> __m256i {
_mm256_mask_cvtph_epu64(_mm256_setzero_si256(), k, a)
}
@@ -15529,7 +15529,7 @@ pub fn _mm256_maskz_cvtph_epu64(k: __mmask8, a: __m128h) -> __m256i {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2uqq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvtph_epu64(a: __m128h) -> __m512i {
_mm512_mask_cvtph_epu64(_mm512_undefined_epi32(), 0xff, a)
}
@@ -15541,7 +15541,7 @@ pub fn _mm512_cvtph_epu64(a: __m128h) -> __m512i {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2uqq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvtph_epu64(src: __m512i, k: __mmask8, a: __m128h) -> __m512i {
unsafe {
transmute(vcvtph2uqq_512(
@@ -15560,7 +15560,7 @@ pub fn _mm512_mask_cvtph_epu64(src: __m512i, k: __mmask8, a: __m128h) -> __m512i
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2uqq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvtph_epu64(k: __mmask8, a: __m128h) -> __m512i {
_mm512_mask_cvtph_epu64(_mm512_setzero_si512(), k, a)
}
@@ -15581,7 +15581,7 @@ pub fn _mm512_maskz_cvtph_epu64(k: __mmask8, a: __m128h) -> __m512i {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2uqq, ROUNDING = 8))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvt_roundph_epu64<const ROUNDING: i32>(a: __m128h) -> __m512i {
static_assert_rounding!(ROUNDING);
_mm512_mask_cvt_roundph_epu64::<ROUNDING>(_mm512_undefined_epi32(), 0xff, a)
@@ -15603,7 +15603,7 @@ pub fn _mm512_cvt_roundph_epu64<const ROUNDING: i32>(a: __m128h) -> __m512i {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2uqq, ROUNDING = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvt_roundph_epu64<const ROUNDING: i32>(
src: __m512i,
k: __mmask8,
@@ -15631,7 +15631,7 @@ pub fn _mm512_mask_cvt_roundph_epu64<const ROUNDING: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2uqq, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvt_roundph_epu64<const ROUNDING: i32>(k: __mmask8, a: __m128h) -> __m512i {
static_assert_rounding!(ROUNDING);
_mm512_mask_cvt_roundph_epu64::<ROUNDING>(_mm512_setzero_si512(), k, a)
@@ -15644,7 +15644,7 @@ pub fn _mm512_maskz_cvt_roundph_epu64<const ROUNDING: i32>(k: __mmask8, a: __m12
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2qq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvttph_epi64(a: __m128h) -> __m128i {
_mm_mask_cvttph_epi64(_mm_undefined_si128(), 0xff, a)
}
@@ -15656,7 +15656,7 @@ pub fn _mm_cvttph_epi64(a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2qq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cvttph_epi64(src: __m128i, k: __mmask8, a: __m128h) -> __m128i {
unsafe { transmute(vcvttph2qq_128(a, src.as_i64x2(), k)) }
}
@@ -15668,7 +15668,7 @@ pub fn _mm_mask_cvttph_epi64(src: __m128i, k: __mmask8, a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2qq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_cvttph_epi64(k: __mmask8, a: __m128h) -> __m128i {
_mm_mask_cvttph_epi64(_mm_setzero_si128(), k, a)
}
@@ -15680,7 +15680,7 @@ pub fn _mm_maskz_cvttph_epi64(k: __mmask8, a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2qq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_cvttph_epi64(a: __m128h) -> __m256i {
_mm256_mask_cvttph_epi64(_mm256_undefined_si256(), 0xff, a)
}
@@ -15692,7 +15692,7 @@ pub fn _mm256_cvttph_epi64(a: __m128h) -> __m256i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2qq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_cvttph_epi64(src: __m256i, k: __mmask8, a: __m128h) -> __m256i {
unsafe { transmute(vcvttph2qq_256(a, src.as_i64x4(), k)) }
}
@@ -15704,7 +15704,7 @@ pub fn _mm256_mask_cvttph_epi64(src: __m256i, k: __mmask8, a: __m128h) -> __m256
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2qq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_cvttph_epi64(k: __mmask8, a: __m128h) -> __m256i {
_mm256_mask_cvttph_epi64(_mm256_setzero_si256(), k, a)
}
@@ -15716,7 +15716,7 @@ pub fn _mm256_maskz_cvttph_epi64(k: __mmask8, a: __m128h) -> __m256i {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2qq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvttph_epi64(a: __m128h) -> __m512i {
_mm512_mask_cvttph_epi64(_mm512_undefined_epi32(), 0xff, a)
}
@@ -15728,7 +15728,7 @@ pub fn _mm512_cvttph_epi64(a: __m128h) -> __m512i {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2qq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvttph_epi64(src: __m512i, k: __mmask8, a: __m128h) -> __m512i {
unsafe {
transmute(vcvttph2qq_512(
@@ -15747,7 +15747,7 @@ pub fn _mm512_mask_cvttph_epi64(src: __m512i, k: __mmask8, a: __m128h) -> __m512
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2qq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvttph_epi64(k: __mmask8, a: __m128h) -> __m512i {
_mm512_mask_cvttph_epi64(_mm512_setzero_si512(), k, a)
}
@@ -15762,7 +15762,7 @@ pub fn _mm512_maskz_cvttph_epi64(k: __mmask8, a: __m128h) -> __m512i {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2qq, SAE = 8))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvtt_roundph_epi64<const SAE: i32>(a: __m128h) -> __m512i {
static_assert_sae!(SAE);
_mm512_mask_cvtt_roundph_epi64::<SAE>(_mm512_undefined_epi32(), 0xff, a)
@@ -15778,7 +15778,7 @@ pub fn _mm512_cvtt_roundph_epi64<const SAE: i32>(a: __m128h) -> __m512i {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2qq, SAE = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvtt_roundph_epi64<const SAE: i32>(
src: __m512i,
k: __mmask8,
@@ -15800,7 +15800,7 @@ pub fn _mm512_mask_cvtt_roundph_epi64<const SAE: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2qq, SAE = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvtt_roundph_epi64<const SAE: i32>(k: __mmask8, a: __m128h) -> __m512i {
static_assert_sae!(SAE);
_mm512_mask_cvtt_roundph_epi64::<SAE>(_mm512_setzero_si512(), k, a)
@@ -15813,7 +15813,7 @@ pub fn _mm512_maskz_cvtt_roundph_epi64<const SAE: i32>(k: __mmask8, a: __m128h)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2uqq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvttph_epu64(a: __m128h) -> __m128i {
_mm_mask_cvttph_epu64(_mm_undefined_si128(), 0xff, a)
}
@@ -15825,7 +15825,7 @@ pub fn _mm_cvttph_epu64(a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2uqq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cvttph_epu64(src: __m128i, k: __mmask8, a: __m128h) -> __m128i {
unsafe { transmute(vcvttph2uqq_128(a, src.as_u64x2(), k)) }
}
@@ -15837,7 +15837,7 @@ pub fn _mm_mask_cvttph_epu64(src: __m128i, k: __mmask8, a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2uqq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_cvttph_epu64(k: __mmask8, a: __m128h) -> __m128i {
_mm_mask_cvttph_epu64(_mm_setzero_si128(), k, a)
}
@@ -15849,7 +15849,7 @@ pub fn _mm_maskz_cvttph_epu64(k: __mmask8, a: __m128h) -> __m128i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2uqq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_cvttph_epu64(a: __m128h) -> __m256i {
_mm256_mask_cvttph_epu64(_mm256_undefined_si256(), 0xff, a)
}
@@ -15861,7 +15861,7 @@ pub fn _mm256_cvttph_epu64(a: __m128h) -> __m256i {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2uqq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_cvttph_epu64(src: __m256i, k: __mmask8, a: __m128h) -> __m256i {
unsafe { transmute(vcvttph2uqq_256(a, src.as_u64x4(), k)) }
}
@@ -15873,7 +15873,7 @@ pub fn _mm256_mask_cvttph_epu64(src: __m256i, k: __mmask8, a: __m128h) -> __m256
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttph2uqq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_cvttph_epu64(k: __mmask8, a: __m128h) -> __m256i {
_mm256_mask_cvttph_epu64(_mm256_setzero_si256(), k, a)
}
@@ -15885,7 +15885,7 @@ pub fn _mm256_maskz_cvttph_epu64(k: __mmask8, a: __m128h) -> __m256i {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2uqq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvttph_epu64(a: __m128h) -> __m512i {
_mm512_mask_cvttph_epu64(_mm512_undefined_epi32(), 0xff, a)
}
@@ -15897,7 +15897,7 @@ pub fn _mm512_cvttph_epu64(a: __m128h) -> __m512i {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2uqq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvttph_epu64(src: __m512i, k: __mmask8, a: __m128h) -> __m512i {
unsafe {
transmute(vcvttph2uqq_512(
@@ -15916,7 +15916,7 @@ pub fn _mm512_mask_cvttph_epu64(src: __m512i, k: __mmask8, a: __m128h) -> __m512
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2uqq))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvttph_epu64(k: __mmask8, a: __m128h) -> __m512i {
_mm512_mask_cvttph_epu64(_mm512_setzero_si512(), k, a)
}
@@ -15931,7 +15931,7 @@ pub fn _mm512_maskz_cvttph_epu64(k: __mmask8, a: __m128h) -> __m512i {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2uqq, SAE = 8))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvtt_roundph_epu64<const SAE: i32>(a: __m128h) -> __m512i {
static_assert_sae!(SAE);
_mm512_mask_cvtt_roundph_epu64::<SAE>(_mm512_undefined_epi32(), 0xff, a)
@@ -15947,7 +15947,7 @@ pub fn _mm512_cvtt_roundph_epu64<const SAE: i32>(a: __m128h) -> __m512i {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2uqq, SAE = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvtt_roundph_epu64<const SAE: i32>(
src: __m512i,
k: __mmask8,
@@ -15969,7 +15969,7 @@ pub fn _mm512_mask_cvtt_roundph_epu64<const SAE: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttph2uqq, SAE = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvtt_roundph_epu64<const SAE: i32>(k: __mmask8, a: __m128h) -> __m512i {
static_assert_sae!(SAE);
_mm512_mask_cvtt_roundph_epu64::<SAE>(_mm512_setzero_si512(), k, a)
@@ -15982,7 +15982,7 @@ pub fn _mm512_maskz_cvtt_roundph_epu64<const SAE: i32>(k: __mmask8, a: __m128h)
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2psx))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvtxph_ps(a: __m128h) -> __m128 {
_mm_mask_cvtxph_ps(_mm_setzero_ps(), 0xff, a)
}
@@ -15995,7 +15995,7 @@ pub fn _mm_cvtxph_ps(a: __m128h) -> __m128 {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2psx))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cvtxph_ps(src: __m128, k: __mmask8, a: __m128h) -> __m128 {
unsafe { vcvtph2psx_128(a, src, k) }
}
@@ -16008,7 +16008,7 @@ pub fn _mm_mask_cvtxph_ps(src: __m128, k: __mmask8, a: __m128h) -> __m128 {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2psx))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_cvtxph_ps(k: __mmask8, a: __m128h) -> __m128 {
_mm_mask_cvtxph_ps(_mm_setzero_ps(), k, a)
}
@@ -16020,7 +16020,7 @@ pub fn _mm_maskz_cvtxph_ps(k: __mmask8, a: __m128h) -> __m128 {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2psx))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_cvtxph_ps(a: __m128h) -> __m256 {
_mm256_mask_cvtxph_ps(_mm256_setzero_ps(), 0xff, a)
}
@@ -16033,7 +16033,7 @@ pub fn _mm256_cvtxph_ps(a: __m128h) -> __m256 {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2psx))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_cvtxph_ps(src: __m256, k: __mmask8, a: __m128h) -> __m256 {
unsafe { vcvtph2psx_256(a, src, k) }
}
@@ -16046,7 +16046,7 @@ pub fn _mm256_mask_cvtxph_ps(src: __m256, k: __mmask8, a: __m128h) -> __m256 {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2psx))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_cvtxph_ps(k: __mmask8, a: __m128h) -> __m256 {
_mm256_mask_cvtxph_ps(_mm256_setzero_ps(), k, a)
}
@@ -16058,7 +16058,7 @@ pub fn _mm256_maskz_cvtxph_ps(k: __mmask8, a: __m128h) -> __m256 {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2psx))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvtxph_ps(a: __m256h) -> __m512 {
_mm512_mask_cvtxph_ps(_mm512_setzero_ps(), 0xffff, a)
}
@@ -16071,7 +16071,7 @@ pub fn _mm512_cvtxph_ps(a: __m256h) -> __m512 {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2psx))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvtxph_ps(src: __m512, k: __mmask16, a: __m256h) -> __m512 {
unsafe { vcvtph2psx_512(a, src, k, _MM_FROUND_CUR_DIRECTION) }
}
@@ -16084,7 +16084,7 @@ pub fn _mm512_mask_cvtxph_ps(src: __m512, k: __mmask16, a: __m256h) -> __m512 {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2psx))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvtxph_ps(k: __mmask16, a: __m256h) -> __m512 {
_mm512_mask_cvtxph_ps(_mm512_setzero_ps(), k, a)
}
@@ -16099,7 +16099,7 @@ pub fn _mm512_maskz_cvtxph_ps(k: __mmask16, a: __m256h) -> __m512 {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2psx, SAE = 8))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvtx_roundph_ps<const SAE: i32>(a: __m256h) -> __m512 {
static_assert_sae!(SAE);
_mm512_mask_cvtx_roundph_ps::<SAE>(_mm512_setzero_ps(), 0xffff, a)
@@ -16116,7 +16116,7 @@ pub fn _mm512_cvtx_roundph_ps<const SAE: i32>(a: __m256h) -> __m512 {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2psx, SAE = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvtx_roundph_ps<const SAE: i32>(
src: __m512,
k: __mmask16,
@@ -16139,7 +16139,7 @@ pub fn _mm512_mask_cvtx_roundph_ps<const SAE: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2psx, SAE = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvtx_roundph_ps<const SAE: i32>(k: __mmask16, a: __m256h) -> __m512 {
static_assert_sae!(SAE);
_mm512_mask_cvtx_roundph_ps::<SAE>(_mm512_setzero_ps(), k, a)
@@ -16153,7 +16153,7 @@ pub fn _mm512_maskz_cvtx_roundph_ps<const SAE: i32>(k: __mmask16, a: __m256h) ->
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtsh2ss))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvtsh_ss(a: __m128, b: __m128h) -> __m128 {
_mm_mask_cvtsh_ss(a, 0xff, a, b)
}
@@ -16167,7 +16167,7 @@ pub fn _mm_cvtsh_ss(a: __m128, b: __m128h) -> __m128 {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtsh2ss))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cvtsh_ss(src: __m128, k: __mmask8, a: __m128, b: __m128h) -> __m128 {
unsafe { vcvtsh2ss(a, b, src, k, _MM_FROUND_CUR_DIRECTION) }
}
@@ -16181,7 +16181,7 @@ pub fn _mm_mask_cvtsh_ss(src: __m128, k: __mmask8, a: __m128, b: __m128h) -> __m
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtsh2ss))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_cvtsh_ss(k: __mmask8, a: __m128, b: __m128h) -> __m128 {
_mm_mask_cvtsh_ss(_mm_set_ss(0.0), k, a, b)
}
@@ -16197,7 +16197,7 @@ pub fn _mm_maskz_cvtsh_ss(k: __mmask8, a: __m128, b: __m128h) -> __m128 {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtsh2ss, SAE = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvt_roundsh_ss<const SAE: i32>(a: __m128, b: __m128h) -> __m128 {
static_assert_sae!(SAE);
_mm_mask_cvt_roundsh_ss::<SAE>(_mm_undefined_ps(), 0xff, a, b)
@@ -16215,7 +16215,7 @@ pub fn _mm_cvt_roundsh_ss<const SAE: i32>(a: __m128, b: __m128h) -> __m128 {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtsh2ss, SAE = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cvt_roundsh_ss<const SAE: i32>(
src: __m128,
k: __mmask8,
@@ -16240,7 +16240,7 @@ pub fn _mm_mask_cvt_roundsh_ss<const SAE: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtsh2ss, SAE = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_cvt_roundsh_ss<const SAE: i32>(k: __mmask8, a: __m128, b: __m128h) -> __m128 {
static_assert_sae!(SAE);
_mm_mask_cvt_roundsh_ss::<SAE>(_mm_set_ss(0.0), k, a, b)
@@ -16253,7 +16253,7 @@ pub fn _mm_maskz_cvt_roundsh_ss<const SAE: i32>(k: __mmask8, a: __m128, b: __m12
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2pd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvtph_pd(a: __m128h) -> __m128d {
_mm_mask_cvtph_pd(_mm_setzero_pd(), 0xff, a)
}
@@ -16266,7 +16266,7 @@ pub fn _mm_cvtph_pd(a: __m128h) -> __m128d {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2pd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cvtph_pd(src: __m128d, k: __mmask8, a: __m128h) -> __m128d {
unsafe { vcvtph2pd_128(a, src, k) }
}
@@ -16279,7 +16279,7 @@ pub fn _mm_mask_cvtph_pd(src: __m128d, k: __mmask8, a: __m128h) -> __m128d {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2pd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_cvtph_pd(k: __mmask8, a: __m128h) -> __m128d {
_mm_mask_cvtph_pd(_mm_setzero_pd(), k, a)
}
@@ -16291,7 +16291,7 @@ pub fn _mm_maskz_cvtph_pd(k: __mmask8, a: __m128h) -> __m128d {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2pd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_cvtph_pd(a: __m128h) -> __m256d {
_mm256_mask_cvtph_pd(_mm256_setzero_pd(), 0xff, a)
}
@@ -16304,7 +16304,7 @@ pub fn _mm256_cvtph_pd(a: __m128h) -> __m256d {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2pd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_mask_cvtph_pd(src: __m256d, k: __mmask8, a: __m128h) -> __m256d {
unsafe { vcvtph2pd_256(a, src, k) }
}
@@ -16317,7 +16317,7 @@ pub fn _mm256_mask_cvtph_pd(src: __m256d, k: __mmask8, a: __m128h) -> __m256d {
#[inline]
#[target_feature(enable = "avx512fp16,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2pd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm256_maskz_cvtph_pd(k: __mmask8, a: __m128h) -> __m256d {
_mm256_mask_cvtph_pd(_mm256_setzero_pd(), k, a)
}
@@ -16329,7 +16329,7 @@ pub fn _mm256_maskz_cvtph_pd(k: __mmask8, a: __m128h) -> __m256d {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2pd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvtph_pd(a: __m128h) -> __m512d {
_mm512_mask_cvtph_pd(_mm512_setzero_pd(), 0xff, a)
}
@@ -16342,7 +16342,7 @@ pub fn _mm512_cvtph_pd(a: __m128h) -> __m512d {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2pd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvtph_pd(src: __m512d, k: __mmask8, a: __m128h) -> __m512d {
unsafe { vcvtph2pd_512(a, src, k, _MM_FROUND_CUR_DIRECTION) }
}
@@ -16355,7 +16355,7 @@ pub fn _mm512_mask_cvtph_pd(src: __m512d, k: __mmask8, a: __m128h) -> __m512d {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2pd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvtph_pd(k: __mmask8, a: __m128h) -> __m512d {
_mm512_mask_cvtph_pd(_mm512_setzero_pd(), k, a)
}
@@ -16370,7 +16370,7 @@ pub fn _mm512_maskz_cvtph_pd(k: __mmask8, a: __m128h) -> __m512d {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2pd, SAE = 8))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_cvt_roundph_pd<const SAE: i32>(a: __m128h) -> __m512d {
static_assert_sae!(SAE);
_mm512_mask_cvt_roundph_pd::<SAE>(_mm512_setzero_pd(), 0xff, a)
@@ -16387,7 +16387,7 @@ pub fn _mm512_cvt_roundph_pd<const SAE: i32>(a: __m128h) -> __m512d {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2pd, SAE = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_mask_cvt_roundph_pd<const SAE: i32>(
src: __m512d,
k: __mmask8,
@@ -16410,7 +16410,7 @@ pub fn _mm512_mask_cvt_roundph_pd<const SAE: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtph2pd, SAE = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm512_maskz_cvt_roundph_pd<const SAE: i32>(k: __mmask8, a: __m128h) -> __m512d {
static_assert_sae!(SAE);
_mm512_mask_cvt_roundph_pd::<SAE>(_mm512_setzero_pd(), k, a)
@@ -16424,7 +16424,7 @@ pub fn _mm512_maskz_cvt_roundph_pd<const SAE: i32>(k: __mmask8, a: __m128h) -> _
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtsh2sd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvtsh_sd(a: __m128d, b: __m128h) -> __m128d {
_mm_mask_cvtsh_sd(a, 0xff, a, b)
}
@@ -16438,7 +16438,7 @@ pub fn _mm_cvtsh_sd(a: __m128d, b: __m128h) -> __m128d {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtsh2sd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cvtsh_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128h) -> __m128d {
unsafe { vcvtsh2sd(a, b, src, k, _MM_FROUND_CUR_DIRECTION) }
}
@@ -16451,7 +16451,7 @@ pub fn _mm_mask_cvtsh_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128h) -> _
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtsh2sd))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_cvtsh_sd(k: __mmask8, a: __m128d, b: __m128h) -> __m128d {
_mm_mask_cvtsh_sd(_mm_set_sd(0.0), k, a, b)
}
@@ -16467,7 +16467,7 @@ pub fn _mm_maskz_cvtsh_sd(k: __mmask8, a: __m128d, b: __m128h) -> __m128d {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtsh2sd, SAE = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvt_roundsh_sd<const SAE: i32>(a: __m128d, b: __m128h) -> __m128d {
static_assert_sae!(SAE);
_mm_mask_cvt_roundsh_sd::<SAE>(a, 0xff, a, b)
@@ -16485,7 +16485,7 @@ pub fn _mm_cvt_roundsh_sd<const SAE: i32>(a: __m128d, b: __m128h) -> __m128d {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtsh2sd, SAE = 8))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_mask_cvt_roundsh_sd<const SAE: i32>(
src: __m128d,
k: __mmask8,
@@ -16509,7 +16509,7 @@ pub fn _mm_mask_cvt_roundsh_sd<const SAE: i32>(
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtsh2sd, SAE = 8))]
#[rustc_legacy_const_generics(3)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_maskz_cvt_roundsh_sd<const SAE: i32>(k: __mmask8, a: __m128d, b: __m128h) -> __m128d {
static_assert_sae!(SAE);
_mm_mask_cvt_roundsh_sd::<SAE>(_mm_set_sd(0.0), k, a, b)
@@ -16553,7 +16553,7 @@ pub const fn _mm512_cvtsh_h(a: __m512h) -> f16 {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi128_si16)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_cvtsi128_si16(a: __m128i) -> i16 {
unsafe { simd_extract!(a.as_i16x8(), 0) }
@@ -16564,7 +16564,7 @@ pub const fn _mm_cvtsi128_si16(a: __m128i) -> i16 {
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi16_si128)
#[inline]
#[target_feature(enable = "avx512fp16")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")]
pub const fn _mm_cvtsi16_si128(a: i16) -> __m128i {
unsafe { transmute(simd_insert!(i16x8::ZERO, 0, a)) }
@@ -87,7 +87,7 @@ pub unsafe fn _mm256_cvtneebf16_ps(a: *const __m256bh) -> __m256 {
#[inline]
#[target_feature(enable = "avxneconvert")]
#[cfg_attr(test, assert_instr(vcvtneeph2ps))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub unsafe fn _mm_cvtneeph_ps(a: *const __m128h) -> __m128 {
transmute(cvtneeph2ps_128(a))
}
@@ -99,7 +99,7 @@ pub unsafe fn _mm_cvtneeph_ps(a: *const __m128h) -> __m128 {
#[inline]
#[target_feature(enable = "avxneconvert")]
#[cfg_attr(test, assert_instr(vcvtneeph2ps))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub unsafe fn _mm256_cvtneeph_ps(a: *const __m256h) -> __m256 {
transmute(cvtneeph2ps_256(a))
}
@@ -135,7 +135,7 @@ pub unsafe fn _mm256_cvtneobf16_ps(a: *const __m256bh) -> __m256 {
#[inline]
#[target_feature(enable = "avxneconvert")]
#[cfg_attr(test, assert_instr(vcvtneoph2ps))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub unsafe fn _mm_cvtneoph_ps(a: *const __m128h) -> __m128 {
transmute(cvtneoph2ps_128(a))
}
@@ -147,7 +147,7 @@ pub unsafe fn _mm_cvtneoph_ps(a: *const __m128h) -> __m128 {
#[inline]
#[target_feature(enable = "avxneconvert")]
#[cfg_attr(test, assert_instr(vcvtneoph2ps))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub unsafe fn _mm256_cvtneoph_ps(a: *const __m256h) -> __m256 {
transmute(cvtneoph2ps_256(a))
}
@@ -401,7 +401,7 @@
}
types! {
#![stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#![stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
/// 128-bit wide set of 8 `f16` types, x86-specific
///
@@ -768,7 +768,7 @@ pub(crate) const fn $as_from(self) -> $from {
pub use self::avxneconvert::*;
mod avx512fp16;
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub use self::avx512fp16::*;
mod kl;
@@ -10,7 +10,7 @@
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtsi2sh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvti64_sh(a: __m128h, b: i64) -> __m128h {
unsafe { vcvtsi642sh(a, b, _MM_FROUND_CUR_DIRECTION) }
}
@@ -32,7 +32,7 @@ pub fn _mm_cvti64_sh(a: __m128h, b: i64) -> __m128h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtsi2sh, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvt_roundi64_sh<const ROUNDING: i32>(a: __m128h, b: i64) -> __m128h {
unsafe {
static_assert_rounding!(ROUNDING);
@@ -48,7 +48,7 @@ pub fn _mm_cvt_roundi64_sh<const ROUNDING: i32>(a: __m128h, b: i64) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtusi2sh))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvtu64_sh(a: __m128h, b: u64) -> __m128h {
unsafe { vcvtusi642sh(a, b, _MM_FROUND_CUR_DIRECTION) }
}
@@ -70,7 +70,7 @@ pub fn _mm_cvtu64_sh(a: __m128h, b: u64) -> __m128h {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtusi2sh, ROUNDING = 8))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvt_roundu64_sh<const ROUNDING: i32>(a: __m128h, b: u64) -> __m128h {
unsafe {
static_assert_rounding!(ROUNDING);
@@ -85,7 +85,7 @@ pub fn _mm_cvt_roundu64_sh<const ROUNDING: i32>(a: __m128h, b: u64) -> __m128h {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtsh2si))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvtsh_i64(a: __m128h) -> i64 {
unsafe { vcvtsh2si64(a, _MM_FROUND_CUR_DIRECTION) }
}
@@ -106,7 +106,7 @@ pub fn _mm_cvtsh_i64(a: __m128h) -> i64 {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtsh2si, ROUNDING = 8))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvt_roundsh_i64<const ROUNDING: i32>(a: __m128h) -> i64 {
unsafe {
static_assert_rounding!(ROUNDING);
@@ -121,7 +121,7 @@ pub fn _mm_cvt_roundsh_i64<const ROUNDING: i32>(a: __m128h) -> i64 {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtsh2usi))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvtsh_u64(a: __m128h) -> u64 {
unsafe { vcvtsh2usi64(a, _MM_FROUND_CUR_DIRECTION) }
}
@@ -142,7 +142,7 @@ pub fn _mm_cvtsh_u64(a: __m128h) -> u64 {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvtsh2usi, ROUNDING = 8))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvt_roundsh_u64<const ROUNDING: i32>(a: __m128h) -> u64 {
unsafe {
static_assert_rounding!(ROUNDING);
@@ -157,7 +157,7 @@ pub fn _mm_cvt_roundsh_u64<const ROUNDING: i32>(a: __m128h) -> u64 {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttsh2si))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvttsh_i64(a: __m128h) -> i64 {
unsafe { vcvttsh2si64(a, _MM_FROUND_CUR_DIRECTION) }
}
@@ -172,7 +172,7 @@ pub fn _mm_cvttsh_i64(a: __m128h) -> i64 {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttsh2si, SAE = 8))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvtt_roundsh_i64<const SAE: i32>(a: __m128h) -> i64 {
unsafe {
static_assert_sae!(SAE);
@@ -187,7 +187,7 @@ pub fn _mm_cvtt_roundsh_i64<const SAE: i32>(a: __m128h) -> i64 {
#[inline]
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttsh2usi))]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvttsh_u64(a: __m128h) -> u64 {
unsafe { vcvttsh2usi64(a, _MM_FROUND_CUR_DIRECTION) }
}
@@ -202,7 +202,7 @@ pub fn _mm_cvttsh_u64(a: __m128h) -> u64 {
#[target_feature(enable = "avx512fp16")]
#[cfg_attr(test, assert_instr(vcvttsh2usi, SAE = 8))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub fn _mm_cvtt_roundsh_u64<const SAE: i32>(a: __m128h) -> u64 {
unsafe {
static_assert_sae!(SAE);
@@ -75,7 +75,7 @@
pub use self::bt::*;
mod avx512fp16;
#[stable(feature = "stdarch_x86_avx512fp16", since = "CURRENT_RUSTC_VERSION")]
#[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")]
pub use self::avx512fp16::*;
mod amx;
@@ -13,9 +13,9 @@ auto_llvm_sign_conversion: false
neon-stable: &neon-stable
FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]
# #[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
# #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
neon-stable-fp16: &neon-stable-fp16
FnCall: [stable, ['feature = "stdarch_neon_fp16"', 'since = "CURRENT_RUSTC_VERSION"']]
FnCall: [stable, ['feature = "stdarch_neon_fp16"', 'since = "1.94.0"']]
# #[cfg(not(target_arch = "arm64ec"))]
target-not-arm64ec: &target-not-arm64ec
@@ -10,9 +10,9 @@ auto_big_endian: true
neon-stable: &neon-stable
FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]
# #[stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION")]
# #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
neon-stable-fp16: &neon-stable-fp16
FnCall: [stable, ['feature = "stdarch_neon_fp16"', 'since = "CURRENT_RUSTC_VERSION"']]
FnCall: [stable, ['feature = "stdarch_neon_fp16"', 'since = "1.94.0"']]
# #[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))]
neon-cfg-arm-unstable: &neon-cfg-arm-unstable
@@ -55,9 +55,9 @@ neon-target-aarch64-arm64ec: &neon-target-aarch64-arm64ec
neon-not-arm-stable: &neon-not-arm-stable
FnCall: [cfg_attr, [{ FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "neon_intrinsics"', 'since = "1.59.0"']]}]]
# #[cfg_attr(not(target_arch = "arm"), stable(feature = "stdarch_neon_fp16", since = "CURRENT_RUSTC_VERSION"))]
# #[cfg_attr(not(target_arch = "arm"), stable(feature = "stdarch_neon_fp16", since = "1.94.0"))]
neon-not-arm-stable-fp16: &neon-not-arm-stable-fp16
FnCall: [cfg_attr, [{ FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "stdarch_neon_fp16"', 'since = "CURRENT_RUSTC_VERSION"']]}]]
FnCall: [cfg_attr, [{ FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [stable, ['feature = "stdarch_neon_fp16"', 'since = "1.94.0"']]}]]
# #[cfg_attr(all(test, not(target_env = "msvc"))]
msvc-disabled: &msvc-disabled
+1 -1
View File
@@ -73,7 +73,7 @@ pub unsafe fn _Unwind_RaiseException(exception: *mut _Unwind_Exception) -> _Unwi
// corresponds with llvm::WebAssembly::Tag::CPP_EXCEPTION
// in llvm-project/llvm/include/llvm/CodeGen/WasmEHFuncInfo.h
const CPP_EXCEPTION_TAG: i32 = 0;
wasm_throw(CPP_EXCEPTION_TAG, exception.cast())
unsafe { wasm_throw(CPP_EXCEPTION_TAG, exception.cast()) }
}
_ => {
let _ = exception;
+54 -20
View File
@@ -231,25 +231,9 @@ For targets: `armv7-unknown-linux-gnueabihf`
libraries like jemalloc. See the mk/cfg/arm(v7)-unknown-linux-gnueabi{,hf}.mk
file in Rust's source code.
### `aarch64-linux-gnu.defconfig`
For targets: `aarch64-unknown-linux-gnu`
- Path and misc options > Prefix directory = /x-tools/${CT\_TARGET}
- Path and misc options > Use a mirror = ENABLE
- Path and misc options > Base URL = https://ci-mirrors.rust-lang.org/rustc
- Target options > Target Architecture = arm
- Target options > Bitness = 64-bit
- Operating System > Target OS = linux
- Operating System > Linux kernel version = 4.1.49
- Binary utilities > Version of binutils = 2.29.1
- C-library > glibc version = 2.17 -- aarch64 support was introduced in this version
- C compiler > gcc version = 13.2.0
- C compiler > C++ = ENABLE -- to cross compile LLVM
### `i586-linux-gnu.defconfig`
For targets: `i586-unknown-linux-gnu`
For targets: `i586-unknown-linux-gnu`, `i586-unknown-linux-musl` and `i686-unknown-linux-musl`
- Path and misc options > Prefix directory = /x-tools/${CT\_TARGET}
- Target options > Target Architecture = x86
@@ -266,7 +250,7 @@ For targets: `i586-unknown-linux-gnu`
(\*) Compressed debug is enabled by default for gas (assembly) on Linux/x86 targets,
but that makes our `compiler_builtins` incompatible with binutils < 2.32.
### `loongarch64-linux-gnu.defconfig`
### `loongarch64-unknown-linux-gnu.defconfig`
For targets: `loongarch64-unknown-linux-gnu`
@@ -282,7 +266,7 @@ For targets: `loongarch64-unknown-linux-gnu`
- C compiler > gcc version = 14.2.0
- C compiler > C++ = ENABLE -- to cross compile LLVM
### `loongarch64-linux-musl.defconfig`
### `loongarch64-unknown-linux-musl.defconfig`
For targets: `loongarch64-unknown-linux-musl`
@@ -412,6 +396,56 @@ For targets: `powerpc64-unknown-linux-gnu`
(+) These CPU options match the configuration of the toolchains in RHEL6.
### `powerpc64-unknown-linux-musl.defconfig`
For targets: `powerpc64-unknown-linux-musl`
- Path and misc options > Prefix directory = /x-tools/${CT\_TARGET}
- Path and misc options > Use a mirror = ENABLE
- Path and misc options > Base URL = https://ci-mirrors.rust-lang.org/rustc
- Target options > Target Architecture = powerpc
- Target options > Bitness = 64-bit
- Operating System > Target OS = linux
- Operating System > Linux kernel version = 4.19
- Binary utilities > Version of binutils = 2.42
- C-library > musl version = 1.2.5
- C compiler > gcc version = 14.2.0
- C compiler > C++ = ENABLE -- to cross compile LLVM
### `powerpc64le-unknown-linux-gnu.defconfig`
For targets: `powerpc64le-unknown-linux-gnu`
- Path and misc options > Prefix directory = /x-tools/${CT\_TARGET}
- Path and misc options > Use a mirror = ENABLE
- Path and misc options > Base URL = https://ci-mirrors.rust-lang.org/rustc
- Target options > Target Architecture = powerpc
- Target options > Bitness = 64-bit
- Target options > Endianness = Little endian
- Operating System > Target OS = linux
- Operating System > Linux kernel version = 3.10
- Binary utilities > Version of binutils = 2.42
- C-library > glibc version = 2.17
- C compiler > gcc version = 14.2.0
- C compiler > C++ = ENABLE -- to cross compile LLVM
### `powerpc64le-unknown-linux-musl.defconfig`
For targets: `powerpc64le-unknown-linux-musl`
- Path and misc options > Prefix directory = /x-tools/${CT\_TARGET}
- Path and misc options > Use a mirror = ENABLE
- Path and misc options > Base URL = https://ci-mirrors.rust-lang.org/rustc
- Target options > Target Architecture = powerpc
- Target options > Bitness = 64-bit
- Target options > Endianness = Little endian
- Operating System > Target OS = linux
- Operating System > Linux kernel version = 4.19
- Binary utilities > Version of binutils = 2.42
- C-library > musl version = 1.2.5
- C compiler > gcc version = 14.2.0
- C compiler > C++ = ENABLE -- to cross compile LLVM
### `riscv64-unknown-linux-gnu.defconfig`
For targets: `riscv64-unknown-linux-gnu`
@@ -423,7 +457,7 @@ For targets: `riscv64-unknown-linux-gnu`
- Target options > Bitness = 64-bit
- Operating System > Target OS = linux
- Operating System > Linux kernel version = 4.20.17
- Binary utilities > Version of binutils = 2.36.1
- Binary utilities > Version of binutils = 2.40
- C-library > glibc version = 2.29
- C compiler > gcc version = 8.5.0
- C compiler > C++ = ENABLE -- to cross compile LLVM
@@ -3,9 +3,6 @@ FROM ghcr.io/rust-lang/ubuntu:22.04
COPY scripts/cross-apt-packages.sh /scripts/
RUN sh /scripts/cross-apt-packages.sh
COPY scripts/crosstool-ng.sh /scripts/
RUN sh /scripts/crosstool-ng.sh
WORKDIR /build
COPY scripts/musl-toolchain.sh /build/
@@ -14,14 +11,8 @@ RUN CFLAGS="-Wa,--compress-debug-sections=none -Wl,--compress-debug-sections=non
CXXFLAGS="-Wa,--compress-debug-sections=none -Wl,--compress-debug-sections=none" \
bash musl-toolchain.sh aarch64 && rm -rf build
COPY scripts/rustbuild-setup.sh /scripts/
RUN sh /scripts/rustbuild-setup.sh
WORKDIR /tmp
COPY scripts/crosstool-ng-build.sh /scripts/
COPY host-x86_64/dist-arm-linux-musl/arm-linux-musl.defconfig /tmp/crosstool.defconfig
RUN /scripts/crosstool-ng-build.sh
COPY scripts/sccache.sh /scripts/
RUN sh /scripts/sccache.sh
@@ -1,14 +0,0 @@
CT_CONFIG_VERSION="4"
CT_PREFIX_DIR="/x-tools/${CT_TARGET}"
CT_USE_MIRROR=y
CT_MIRROR_BASE_URL="https://ci-mirrors.rust-lang.org/rustc"
CT_ARCH_ARM=y
CT_ARCH_ARCH="armv6"
CT_ARCH_FLOAT_SW=y
CT_KERNEL_LINUX=y
CT_LINUX_V_3_2=y
CT_BINUTILS_V_2_32=y
CT_GLIBC_V_2_17=y
CT_GCC_V_8=y
CT_CC_LANG_CXX=y
CT_MUSL_V_1_2_5=y
@@ -12,5 +12,7 @@ CT_TARGET_LDFLAGS="-mcmodel=medium"
CT_KERNEL_LINUX=y
CT_LINUX_V_5_19=y
CT_GLIBC_V_2_36=y
CT_BINUTILS_V_2_42=y
CT_GCC_V_14=y
CT_CC_GCC_ENABLE_DEFAULT_PIE=y
CT_CC_LANG_CXX=y
@@ -12,6 +12,9 @@ CT_TARGET_LDFLAGS="-mcmodel=medium"
CT_KERNEL_LINUX=y
CT_LINUX_V_5_19=y
CT_LIBC_MUSL=y
CT_MUSL_V_1_2_5=y
CT_BINUTILS_V_2_42=y
CT_GCC_V_14=y
CT_CC_GCC_ENABLE_DEFAULT_PIE=y
CT_CC_LANG_CXX=y
CT_GETTEXT_NEEDED=y
@@ -11,5 +11,7 @@ CT_KERNEL_LINUX=y
CT_LINUX_V_4_19=y
CT_LIBC_MUSL=y
CT_MUSL_V_1_2_5=y
CT_BINUTILS_V_2_42=y
CT_GCC_V_14=y
CT_CC_LANG_CXX=y
CT_GETTEXT_NEEDED=y
@@ -11,5 +11,7 @@ CT_ARCH_ARCH="powerpc64le"
CT_KERNEL_LINUX=y
CT_LINUX_V_3_10=y
CT_GLIBC_V_2_17=y
CT_BINUTILS_V_2_42=y
CT_GCC_V_14=y
CT_CC_LANG_CXX=y
CT_GETTEXT_NEEDED=y
@@ -12,5 +12,7 @@ CT_KERNEL_LINUX=y
CT_LINUX_V_4_19=y
CT_LIBC_MUSL=y
CT_MUSL_V_1_2_5=y
CT_BINUTILS_V_2_42=y
CT_GCC_V_14=y
CT_CC_LANG_CXX=y
CT_GETTEXT_NEEDED=y
@@ -123,7 +123,7 @@ macro_rules! declare_with_version {
("clippy::into_iter_on_array", "array_into_iter"),
#[clippy::version = ""]
("clippy::invalid_atomic_ordering", "invalid_atomic_ordering"),
#[clippy::version = "CURRENT_RUSTC_VERSION"]
#[clippy::version = "1.88.0"]
("clippy::invalid_null_ptr_usage", "invalid_null_arguments"),
#[clippy::version = ""]
("clippy::invalid_ref", "invalid_value"),
@@ -16,7 +16,7 @@ fn main() {
&root_path.join("src/doc/rustc"),
&root_path.join("src/doc/rustdoc"),
],
|path, _is_dir| walk::filter_dirs(path),
|path, _is_dir| filter_dirs(path),
&mut |entry, contents| {
if !contents.contains(VERSION_PLACEHOLDER) {
return;
@@ -27,3 +27,9 @@ fn main() {
},
);
}
fn filter_dirs(path: &std::path::Path) -> bool {
// tidy would skip some paths that we do want to process
let allow = ["library/stdarch"];
walk::filter_dirs(path) && !allow.iter().any(|p| path.ends_with(p))
}
+2 -2
View File
@@ -334,9 +334,9 @@ macro_rules! extra_check {
if js_lint {
if bless {
eprintln!("linting javascript files");
} else {
eprintln!("linting javascript files and applying suggestions");
} else {
eprintln!("linting javascript files");
}
let res = rustdoc_js::lint(outdir, librustdoc_path, tools_path, bless);
if res.is_err() {
@@ -6,6 +6,8 @@ LL | const N: C::M = 4u8;
|
= note: expected associated type `<C as O>::M`
found type `u8`
= note: the associated type `<C as O>::M` is defined as `u8` in the implementation, but the where-bound `C` shadows this definition
see issue #152409 <https://github.com/rust-lang/rust/issues/152409> for more information
help: consider constraining the associated type `<C as O>::M` to `u8`
|
LL | impl<C: O<M = u8>> U<C> for u16 {
@@ -75,6 +75,8 @@ LL | fn make() -> Self::Ty { 0u8 }
found type `u8`
= help: consider constraining the associated type `<A2<T> as Tr>::Ty` to `u8` or calling a method that returns `<A2<T> as Tr>::Ty`
= note: for more information, visit https://doc.rust-lang.org/book/ch19-03-advanced-traits.html
= note: the associated type `<A2<T> as Tr>::Ty` is defined as `u8` in the implementation, but the where-bound `A2<T>` shadows this definition
see issue #152409 <https://github.com/rust-lang/rust/issues/152409> for more information
error[E0308]: mismatched types
--> $DIR/defaults-specialization.rs:44:29
@@ -89,6 +91,8 @@ LL | fn make() -> Self::Ty { true }
|
= note: expected associated type `<B2<T> as Tr>::Ty`
found type `bool`
= note: the associated type `<B2<T> as Tr>::Ty` is defined as `bool` in the implementation, but the where-bound `B2<T>` shadows this definition
see issue #152409 <https://github.com/rust-lang/rust/issues/152409> for more information
error[E0308]: mismatched types
--> $DIR/defaults-specialization.rs:87:32
@@ -121,6 +125,8 @@ help: a method is available that returns `<B<()> as Tr>::Ty`
|
LL | fn make() -> Self::Ty {
| ^^^^^^^^^^^^^^^^^^^^^ consider calling `Tr::make`
= note: the associated type `<B<()> as Tr>::Ty` is defined as `bool` in the implementation, but the where-bound `B<()>` shadows this definition
see issue #152409 <https://github.com/rust-lang/rust/issues/152409> for more information
error[E0308]: mismatched types
--> $DIR/defaults-specialization.rs:89:33
@@ -153,6 +159,8 @@ help: a method is available that returns `<B2<()> as Tr>::Ty`
|
LL | fn make() -> Self::Ty {
| ^^^^^^^^^^^^^^^^^^^^^ consider calling `Tr::make`
= note: the associated type `<B2<()> as Tr>::Ty` is defined as `bool` in the implementation, but the where-bound `B2<()>` shadows this definition
see issue #152409 <https://github.com/rust-lang/rust/issues/152409> for more information
error: aborting due to 9 previous errors; 1 warning emitted
@@ -0,0 +1,17 @@
// Regression test for issue #149910.
// The compiler previously incorrectly claimed that the local param-env bound
// shadowed the global impl, but they are actually the same.
trait Trait {
type Assoc;
}
impl<T> Trait for T {
type Assoc = T;
}
fn foo<T: Trait>(x: T::Assoc) -> u32 {
x //~ ERROR mismatched types
}
fn main() {}
@@ -0,0 +1,18 @@
error[E0308]: mismatched types
--> $DIR/param-env-shadowing-false-positive.rs:14:5
|
LL | fn foo<T: Trait>(x: T::Assoc) -> u32 {
| --- expected `u32` because of return type
LL | x
| ^ expected `u32`, found associated type
|
= note: expected type `u32`
found associated type `<T as Trait>::Assoc`
help: consider constraining the associated type `<T as Trait>::Assoc` to `u32`
|
LL | fn foo<T: Trait<Assoc = u32>>(x: T::Assoc) -> u32 {
| +++++++++++++
error: aborting due to 1 previous error
For more information about this error, try `rustc --explain E0308`.
@@ -0,0 +1,17 @@
// Regression test for issue #149910.
// This ensures that the diagnostics logic handles Generic Associated Types (GATs)
// correctly without crashing (ICE).
trait Trait {
type Assoc<T>;
}
impl<T> Trait for T {
type Assoc<U> = U;
}
fn foo<T: Trait>(x: T::Assoc<T>) -> u32 {
x //~ ERROR mismatched types
}
fn main() {}
@@ -0,0 +1,18 @@
error[E0308]: mismatched types
--> $DIR/param-env-shadowing-gat.rs:14:5
|
LL | fn foo<T: Trait>(x: T::Assoc<T>) -> u32 {
| --- expected `u32` because of return type
LL | x
| ^ expected `u32`, found associated type
|
= note: expected type `u32`
found associated type `<T as Trait>::Assoc<T>`
help: consider constraining the associated type `<T as Trait>::Assoc<T>` to `u32`
|
LL | fn foo<T: Trait<Assoc<T> = u32>>(x: T::Assoc<T>) -> u32 {
| ++++++++++++++++
error: aborting due to 1 previous error
For more information about this error, try `rustc --explain E0308`.
@@ -0,0 +1,17 @@
// Regression test for issue #149910.
// We want to tell the user about param_env shadowing here.
trait Trait {
type Assoc;
}
impl<T> Trait for T {
type Assoc = T;
}
fn foo<T: Trait>(x: T) -> T::Assoc {
x
//~^ ERROR mismatched types
}
fn main() {}
@@ -0,0 +1,22 @@
error[E0308]: mismatched types
--> $DIR/param-env-shadowing-issue-149910.rs:13:5
|
LL | fn foo<T: Trait>(x: T) -> T::Assoc {
| - -------- expected `<T as Trait>::Assoc` because of return type
| |
| found this type parameter
LL | x
| ^ expected associated type, found type parameter `T`
|
= note: expected associated type `<T as Trait>::Assoc`
found type parameter `T`
= note: the associated type `<T as Trait>::Assoc` is defined as `T` in the implementation, but the where-bound `T` shadows this definition
see issue #152409 <https://github.com/rust-lang/rust/issues/152409> for more information
help: consider further restricting this bound
|
LL | fn foo<T: Trait<Assoc = T>>(x: T) -> T::Assoc {
| +++++++++++
error: aborting due to 1 previous error
For more information about this error, try `rustc --explain E0308`.
@@ -0,0 +1,16 @@
// Regression test for https://github.com/rust-lang/rust/issues/141738
//
// Using a struct constructor as an array repeat count with
// `min_generic_const_args` used to ICE with "unexpected `DefKind`
// for const alias to resolve to: Ctor(Struct, Const)".
// It should now produce a proper type error.
#![feature(min_generic_const_args)]
//~^ WARN the feature `min_generic_const_args` is incomplete
struct S;
fn main() {
let _b = [0; S];
//~^ ERROR the constant `S` is not of type `usize`
}
@@ -0,0 +1,19 @@
warning: the feature `min_generic_const_args` is incomplete and may not be safe to use and/or cause compiler crashes
--> $DIR/struct-ctor-in-array-len.rs:8:12
|
LL | #![feature(min_generic_const_args)]
| ^^^^^^^^^^^^^^^^^^^^^^
|
= note: see issue #132980 <https://github.com/rust-lang/rust/issues/132980> for more information
= note: `#[warn(incomplete_features)]` on by default
error: the constant `S` is not of type `usize`
--> $DIR/struct-ctor-in-array-len.rs:14:14
|
LL | let _b = [0; S];
| ^^^^^^ expected `usize`, found `S`
|
= note: the length of array `[{integer}; S]` must be type `usize`
error: aborting due to 1 previous error; 1 warning emitted
@@ -0,0 +1,7 @@
// Check that `write!` without a destination gives a helpful error message.
// See https://github.com/rust-lang/rust/issues/152493
fn main() {
write!("S");
//~^ ERROR requires a destination and format arguments
}
@@ -0,0 +1,8 @@
error: requires a destination and format arguments, like `write!(dest, "format string", args...)`
--> $DIR/write-missing-destination.rs:5:5
|
LL | write!("S");
| ^^^^^^^^^^^
error: aborting due to 1 previous error
@@ -21,6 +21,8 @@ LL | ()
found unit type `()`
= help: consider constraining the associated type `<T as Foo>::Assoc` to `()` or calling a method that returns `<T as Foo>::Assoc`
= note: for more information, visit https://doc.rust-lang.org/book/ch19-03-advanced-traits.html
= note: the associated type `<T as Foo>::Assoc` is defined as `()` in the implementation, but the where-bound `T` shadows this definition
see issue #152409 <https://github.com/rust-lang/rust/issues/152409> for more information
error[E0308]: mismatched types
--> $DIR/specialization-default-projection.rs:32:5
@@ -37,6 +39,8 @@ LL | generic::<()>()
found associated type `<() as Foo>::Assoc`
= help: consider constraining the associated type `<() as Foo>::Assoc` to `()`
= note: for more information, visit https://doc.rust-lang.org/book/ch19-03-advanced-traits.html
= note: the associated type `<() as Foo>::Assoc` is defined as `()` in the implementation, but the where-bound `()` shadows this definition
see issue #152409 <https://github.com/rust-lang/rust/issues/152409> for more information
error: aborting due to 2 previous errors; 1 warning emitted
@@ -21,6 +21,8 @@ LL | ()
found unit type `()`
= help: consider constraining the associated type `<T as Foo>::Assoc` to `()` or calling a method that returns `<T as Foo>::Assoc`
= note: for more information, visit https://doc.rust-lang.org/book/ch19-03-advanced-traits.html
= note: the associated type `<T as Foo>::Assoc` is defined as `()` in the implementation, but the where-bound `T` shadows this definition
see issue #152409 <https://github.com/rust-lang/rust/issues/152409> for more information
error[E0308]: mismatched types
--> $DIR/specialization-default-projection.rs:32:5
@@ -37,6 +39,8 @@ LL | generic::<()>()
found associated type `<() as Foo>::Assoc`
= help: consider constraining the associated type `<() as Foo>::Assoc` to `()`
= note: for more information, visit https://doc.rust-lang.org/book/ch19-03-advanced-traits.html
= note: the associated type `<() as Foo>::Assoc` is defined as `()` in the implementation, but the where-bound `()` shadows this definition
see issue #152409 <https://github.com/rust-lang/rust/issues/152409> for more information
error: aborting due to 2 previous errors; 1 warning emitted
@@ -20,6 +20,8 @@ LL | Box::new(self)
|
= note: expected associated type `<T as Example>::Output`
found struct `Box<T>`
= note: the associated type `<T as Example>::Output` is defined as `Box<T>` in the implementation, but the where-bound `T` shadows this definition
see issue #152409 <https://github.com/rust-lang/rust/issues/152409> for more information
error[E0308]: mismatched types
--> $DIR/specialization-default-types.rs:29:5
@@ -33,6 +35,8 @@ LL | Example::generate(t)
found associated type `<T as Example>::Output`
= help: consider constraining the associated type `<T as Example>::Output` to `Box<T>`
= note: for more information, visit https://doc.rust-lang.org/book/ch19-03-advanced-traits.html
= note: the associated type `<T as Example>::Output` is defined as `Box<T>` in the implementation, but the where-bound `T` shadows this definition
see issue #152409 <https://github.com/rust-lang/rust/issues/152409> for more information
error: aborting due to 2 previous errors; 1 warning emitted
@@ -20,6 +20,8 @@ LL | Box::new(self)
|
= note: expected associated type `<T as Example>::Output`
found struct `Box<T>`
= note: the associated type `<T as Example>::Output` is defined as `Box<T>` in the implementation, but the where-bound `T` shadows this definition
see issue #152409 <https://github.com/rust-lang/rust/issues/152409> for more information
error[E0308]: mismatched types
--> $DIR/specialization-default-types.rs:29:5
@@ -33,6 +35,8 @@ LL | Example::generate(t)
found associated type `<T as Example>::Output`
= help: consider constraining the associated type `<T as Example>::Output` to `Box<T>`
= note: for more information, visit https://doc.rust-lang.org/book/ch19-03-advanced-traits.html
= note: the associated type `<T as Example>::Output` is defined as `Box<T>` in the implementation, but the where-bound `T` shadows this definition
see issue #152409 <https://github.com/rust-lang/rust/issues/152409> for more information
error: aborting due to 2 previous errors; 1 warning emitted